Search in sources :

Example 1 with OFEventWFMTopology

use of org.openkilda.wfm.topology.event.OFEventWFMTopology in project open-kilda by telstra.

the class OFEventWfmTest method basicLinkDiscoveryTest.

/**
 * BasicLinkDiscoveryTest will exercise the basics of Link Discovery test.
 * The key results should show up in a kafka topic, which are dumped to file.
 */
@Test
@Ignore
public void basicLinkDiscoveryTest() throws IOException, ConfigurationException, CmdLineException {
    System.out.println("==> Starting BasicLinkDiscoveryTest");
    OFEventWFMTopology manager = new OFEventWFMTopology(makeLaunchEnvironment());
    TopologyConfig config = manager.getConfig();
    String topo_input_topic = config.getKafkaTopoDiscoTopic();
    Tuple tuple;
    KeyValueState<String, Object> state = new InMemoryKeyValueState<>();
    initMocks(topo_input_topic);
    List<PathNode> nodes = Arrays.asList(new PathNode("sw1", 1, 0, 10L), new PathNode("sw2", 2, 1, 10L));
    InfoData data = new IslInfoData(10L, nodes, 10000L, IslChangeType.DISCOVERED, 9000L);
    String isl_discovered = MAPPER.writeValueAsString(data);
    OFELinkBolt linkBolt = new OFELinkBolt(config);
    linkBolt.prepare(stormConfig(), topologyContext, outputCollector);
    linkBolt.initState(state);
    ArrayList<DiscoveryFilterEntity> skipNodes = new ArrayList<>(1);
    skipNodes.add(new DiscoveryFilterEntity("sw1", "1"));
    CommandMessage islFilterSetup = new CommandMessage(new DiscoveryFilterPopulateData(skipNodes), 1, "discovery-test", Destination.WFM_OF_DISCOVERY);
    String json = MAPPER.writeValueAsString(islFilterSetup);
    tuple = new TupleImpl(topologyContext, Collections.singletonList(json), 4, "message");
    linkBolt.execute(tuple);
    tuple = new TupleImpl(topologyContext, Arrays.asList("sw1", OFEMessageUtils.SWITCH_UP), 0, topo_input_topic);
    linkBolt.execute(tuple);
    tuple = new TupleImpl(topologyContext, Arrays.asList("sw2", OFEMessageUtils.SWITCH_UP), 0, topo_input_topic);
    linkBolt.execute(tuple);
    tuple = new TupleImpl(topologyContext, Arrays.asList("sw1", "1", OFEMessageUtils.PORT_UP), 1, topo_input_topic);
    linkBolt.execute(tuple);
    tuple = new TupleImpl(topologyContext, Arrays.asList("sw1", "2", OFEMessageUtils.PORT_UP), 1, topo_input_topic);
    linkBolt.execute(tuple);
    Tuple tickTuple = new TupleImpl(topologyContext, Collections.emptyList(), 2, Constants.SYSTEM_TICK_STREAM_ID);
    linkBolt.execute(tickTuple);
    tuple = new TupleImpl(topologyContext, Collections.singletonList(isl_discovered), 3, topo_input_topic);
    linkBolt.execute(tuple);
    linkBolt.execute(tickTuple);
    linkBolt.execute(tickTuple);
    // 1 isls, 3 seconds interval, 9 seconds test duration == 3 discovery commands
    // there is only 1 isl each cycle because of isl filter
    // messagesExpected = 3 ;
    // TODO: (crimi) validate is 7 due to merged topics
    messagesExpected = 7;
    messagesReceived = outputCollectorMock.getMessagesCount(config.getKafkaTopoDiscoTopic());
    Assert.assertEquals(messagesExpected, messagesReceived);
    // "isl discovered" x1
    // messagesExpected = 1;
    // TODO: (crimi) validate is 7 due to merged topics
    messagesExpected = 7;
    messagesReceived = outputCollectorMock.getMessagesCount(config.getKafkaTopoDiscoTopic());
    Assert.assertEquals(messagesExpected, messagesReceived);
    linkBolt.execute(tickTuple);
    // no new discovery commands
    // messagesExpected = 3;
    // TODO .. increased from 3 to 7 due to topic changes .. confirm it
    messagesExpected = 7;
    messagesReceived = outputCollectorMock.getMessagesCount(config.getKafkaTopoDiscoTopic());
    Assert.assertEquals(messagesExpected, messagesReceived);
    // +1 discovery fails
    // messagesExpected = 2;
    // TODO .. there should be more or we aren't looking in right place
    messagesExpected = 7;
    messagesReceived = outputCollectorMock.getMessagesCount(config.getKafkaTopoDiscoTopic());
    Assert.assertEquals(messagesExpected, messagesReceived);
}
Also used : OFELinkBolt(org.openkilda.wfm.topology.event.OFELinkBolt) ArrayList(java.util.ArrayList) PathNode(org.openkilda.messaging.info.event.PathNode) InMemoryKeyValueState(org.apache.storm.state.InMemoryKeyValueState) CommandMessage(org.openkilda.messaging.command.CommandMessage) DiscoveryFilterEntity(org.openkilda.messaging.command.discovery.DiscoveryFilterEntity) OFEventWFMTopology(org.openkilda.wfm.topology.event.OFEventWFMTopology) IslInfoData(org.openkilda.messaging.info.event.IslInfoData) InfoData(org.openkilda.messaging.info.InfoData) IslInfoData(org.openkilda.messaging.info.event.IslInfoData) TupleImpl(org.apache.storm.tuple.TupleImpl) TopologyConfig(org.openkilda.wfm.topology.TopologyConfig) Tuple(org.apache.storm.tuple.Tuple) DiscoveryFilterPopulateData(org.openkilda.messaging.command.discovery.DiscoveryFilterPopulateData) Ignore(org.junit.Ignore) Test(org.junit.Test)

Example 2 with OFEventWFMTopology

use of org.openkilda.wfm.topology.event.OFEventWFMTopology in project open-kilda by telstra.

the class OFEventWfmTest method setupOnce.

@BeforeClass
public static void setupOnce() throws Exception {
    AbstractStormTest.setupOnce();
    // //////
    Properties overlay = new Properties();
    overlay.setProperty("filter.directory", server.tempDir.getAbsolutePath());
    LaunchEnvironment env = makeLaunchEnvironment(overlay);
    manager = new OFEventWFMTopology(env);
    cluster.submitTopology(manager.makeTopologyName(), stormConfig(), manager.createTopology());
    discoFiler = new KafkaFilerTopology(env, manager.getConfig().getKafkaTopoDiscoTopic());
    cluster.submitTopology("utils-1", stormConfig(), discoFiler.createTopology());
    Utils.sleep(5 * 1000);
// //////
}
Also used : OFEventWFMTopology(org.openkilda.wfm.topology.event.OFEventWFMTopology) KafkaFilerTopology(org.openkilda.wfm.topology.utils.KafkaFilerTopology) Properties(java.util.Properties) BeforeClass(org.junit.BeforeClass)

Example 3 with OFEventWFMTopology

use of org.openkilda.wfm.topology.event.OFEventWFMTopology in project open-kilda by telstra.

the class OFEventWfmTest method BasicSwitchPortEventsTest.

@Test
@Ignore
public void BasicSwitchPortEventsTest() throws Exception {
    System.out.println("==> Starting BasicSwitchEventTest");
    // TOOD: Is this test still valide, without the deprecated Switch/Port bolts?
    OFEventWFMTopology manager = new OFEventWFMTopology(makeLaunchEnvironment());
    TopologyConfig config = manager.getConfig();
    String sw1_up = OFEMessageUtils.createSwitchDataMessage(OFEMessageUtils.SWITCH_UP, "sw1");
    String sw2_up = OFEMessageUtils.createSwitchDataMessage(OFEMessageUtils.SWITCH_UP, "sw2");
    String sw1p1_up = OFEMessageUtils.createPortDataMessage(OFEMessageUtils.PORT_UP, "sw1", "1");
    String sw2p2_up = OFEMessageUtils.createPortDataMessage(OFEMessageUtils.PORT_UP, "sw2", "2");
    String sw2p2_down = OFEMessageUtils.createPortDataMessage(OFEMessageUtils.PORT_DOWN, "sw2", "2");
    String switch_topic = config.getKafkaTopoDiscoTopic();
    String port_topic = config.getKafkaTopoDiscoTopic();
    // send sw1 and sw2 up
    kProducer.pushMessage(switch_topic, sw1_up);
    kProducer.pushMessage(switch_topic, sw2_up);
    // sent sw1/port1 up ... sw2/port2 up
    kProducer.pushMessage(port_topic, sw1p1_up);
    kProducer.pushMessage(port_topic, sw2p2_up);
    // send duplicates ... NB: at present, dupes aren't detected until we do FieldGrouping
    // probably should send duplicates in another test
    kProducer.pushMessage(switch_topic, sw1_up);
    kProducer.pushMessage(switch_topic, sw2_up);
    kProducer.pushMessage(port_topic, sw1p1_up);
    kProducer.pushMessage(port_topic, sw2p2_up);
    Utils.sleep(4 * 1000);
    // at present, everything is passed through, no filter.
    messagesExpected = 8;
    messagesReceived = safeLinesCount(discoFiler.getFiler().getFile());
    Assert.assertEquals(messagesExpected, messagesReceived);
    Utils.sleep(1 * 1000);
    // sending this now just for fun .. we'll more formally test that the ISL state is correct.
    kProducer.pushMessage(port_topic, sw2p2_down);
    Utils.sleep(2 * 1000);
    // TODO: how can we programmatically determine how many ISL messages should be generated?
    messagesReceived = safeLinesCount(discoFiler.getFiler().getFile());
    if (messagesReceived == 0) {
        System.out.println("Message count failure; NO MESSAGES RECEIVED!");
        for (String s : Files.readLines(discoFiler.getFiler().getFile(), Charsets.UTF_8)) {
            System.out.println("\t\t > " + s);
        }
    }
    // NB: ISL discovery messages will be generated .. multiple .. at present 9-11.
    Assert.assertTrue(messagesReceived > 0);
    cluster.killTopology(manager.makeTopologyName());
    cluster.killTopology("utils-1");
    Utils.sleep(4 * 1000);
}
Also used : OFEventWFMTopology(org.openkilda.wfm.topology.event.OFEventWFMTopology) TopologyConfig(org.openkilda.wfm.topology.TopologyConfig) Ignore(org.junit.Ignore) Test(org.junit.Test)

Aggregations

OFEventWFMTopology (org.openkilda.wfm.topology.event.OFEventWFMTopology)3 Ignore (org.junit.Ignore)2 Test (org.junit.Test)2 TopologyConfig (org.openkilda.wfm.topology.TopologyConfig)2 ArrayList (java.util.ArrayList)1 Properties (java.util.Properties)1 InMemoryKeyValueState (org.apache.storm.state.InMemoryKeyValueState)1 Tuple (org.apache.storm.tuple.Tuple)1 TupleImpl (org.apache.storm.tuple.TupleImpl)1 BeforeClass (org.junit.BeforeClass)1 CommandMessage (org.openkilda.messaging.command.CommandMessage)1 DiscoveryFilterEntity (org.openkilda.messaging.command.discovery.DiscoveryFilterEntity)1 DiscoveryFilterPopulateData (org.openkilda.messaging.command.discovery.DiscoveryFilterPopulateData)1 InfoData (org.openkilda.messaging.info.InfoData)1 IslInfoData (org.openkilda.messaging.info.event.IslInfoData)1 PathNode (org.openkilda.messaging.info.event.PathNode)1 OFELinkBolt (org.openkilda.wfm.topology.event.OFELinkBolt)1 KafkaFilerTopology (org.openkilda.wfm.topology.utils.KafkaFilerTopology)1