use of org.openkilda.messaging.info.discovery.NetworkInfoData in project open-kilda by telstra.
the class OFELinkBoltFloodTest method warmBoltOnHighLoadedTopic.
@Test(timeout = 5000 * 60)
public void warmBoltOnHighLoadedTopic() throws Exception {
topology = new OFEventWFMTopology(makeLaunchEnvironment());
teConsumer = new TestKafkaConsumer(topology.getConfig().getKafkaTopoEngTopic(), kafkaProperties(UUID.nameUUIDFromBytes(Destination.TOPOLOGY_ENGINE.toString().getBytes()).toString()));
teConsumer.start();
// Size of messages in topic before bolt start
final int floodSize = 100000;
SwitchInfoData data = new SwitchInfoData("switchId", SwitchState.ADDED, "address", "hostname", "description", "controller");
InfoMessage message = new InfoMessage(data, System.currentTimeMillis(), UUID.randomUUID().toString());
// Floooding
sendMessages(message, topology.getConfig().getKafkaTopoDiscoTopic(), floodSize);
StormTopology stormTopology = topology.createTopology();
Config config = stormConfig();
cluster.submitTopology(OFELinkBoltFloodTest.class.getSimpleName(), config, stormTopology);
NetworkInfoData dump = new NetworkInfoData("test", Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), Collections.emptySet());
InfoMessage info = new InfoMessage(dump, 0, DEFAULT_CORRELATION_ID, Destination.WFM);
String request = objectMapper.writeValueAsString(info);
// Send DumpMessage to topic with offset floodSize+1.
kProducer.pushMessage(topology.getConfig().getKafkaTopoDiscoTopic(), request);
// Wait all messages
int pooled = 0;
while (pooled < floodSize) {
if (teConsumer.pollMessage() != null)
++pooled;
}
assertEquals(floodSize, pooled);
}
use of org.openkilda.messaging.info.discovery.NetworkInfoData in project open-kilda by telstra.
the class OFELinkBoltTest method cacheLoadCheck.
/**
* Part of warm mechanism checks. That test verifies appropriately unpack and fill inner
* cache after getting network dump information from FL.
*/
@Test
public void cacheLoadCheck() throws IOException {
// Send cache data and verify is inner state is ok
SwitchInfoData sw1 = new SwitchInfoData("sw1", SwitchState.ADDED, "127.0.0.1", "localhost", "test switch", "kilda");
SwitchInfoData sw2 = new SwitchInfoData("sw2", SwitchState.ADDED, "127.0.0.1", "localhost", "test switch", "kilda");
PortInfoData sw1Port1 = new PortInfoData(sw1.getSwitchId(), 1, null, UP);
PortInfoData sw2Port1 = new PortInfoData(sw2.getSwitchId(), 1, null, UP);
NetworkInfoData dump = new NetworkInfoData("test", new HashSet<>(Arrays.asList(sw1, sw2)), new HashSet<>(Arrays.asList(sw1Port1, sw2Port1)), Collections.emptySet(), Collections.emptySet());
InfoMessage info = new InfoMessage(dump, 0, DEFAULT_CORRELATION_ID, Destination.WFM);
String request = objectMapper.writeValueAsString(info);
Tuple dumpTuple = new TupleImpl(context, new Values(request), TASK_ID_BOLT, STREAM_ID_INPUT);
bolt.doWork(dumpTuple);
List<DiscoveryNode> discoveryQueue = bolt.getDiscoveryQueue();
// I don't check discoveryQueue contents because that should be done in a test for
// handleSwitchEvent and handlePortEvent.
assertEquals(2, discoveryQueue.size());
}
use of org.openkilda.messaging.info.discovery.NetworkInfoData in project open-kilda by telstra.
the class AbstractSerializerTest method dumpNetworkResponseTest.
@Test
public void dumpNetworkResponseTest() throws IOException, ClassNotFoundException {
NetworkInfoData data = new NetworkInfoData(requester, new HashSet<>(Arrays.asList(sw1, sw2)), new HashSet<>(), Collections.singleton(isl), Collections.singleton(new ImmutablePair<>(flowModel, flowModel)));
System.out.println(data);
InfoMessage info = new InfoMessage(data, System.currentTimeMillis(), CORRELATION_ID, DESTINATION);
info.setData(data);
serialize(info);
Message message = (Message) deserialize();
assertTrue(message instanceof InfoMessage);
InfoMessage resultInfo = (InfoMessage) message;
assertTrue(resultInfo.getData() != null);
NetworkInfoData resultData = (NetworkInfoData) resultInfo.getData();
System.out.println(resultData);
assertEquals(data, resultData);
assertEquals(data.hashCode(), resultData.hashCode());
}
Aggregations