use of org.openkilda.messaging.info.InfoData in project open-kilda by telstra.
the class CacheBolt method doWork.
/**
* {@inheritDoc}
*/
@Override
public void doWork(Tuple tuple) {
if (CtrlAction.boltHandlerEntrance(this, tuple))
return;
logger.trace("State before: {}", state);
String json = tuple.getString(0);
String source = tuple.getSourceComponent();
// TODO: Eliminate the inefficiency introduced through the hack
try {
logger.info("Received cache data={}", tuple);
BaseMessage bm = MAPPER.readValue(json, BaseMessage.class);
if (bm instanceof InfoMessage) {
InfoMessage message = (InfoMessage) bm;
InfoData data = message.getData();
if (data instanceof NetworkInfoData) {
logger.debug("Storage content message {}", json);
handleNetworkDump(data, tuple);
isReceivedCacheInfo = true;
} else if (!isReceivedCacheInfo) {
logger.debug("Cache message fail due bolt not initialized: " + "component={}, stream={}, tuple={}", tuple.getSourceComponent(), tuple.getSourceStreamId(), tuple);
} else if (data instanceof SwitchInfoData) {
logger.info("Cache update switch info data: {}", data);
handleSwitchEvent((SwitchInfoData) data, tuple);
} else if (data instanceof IslInfoData) {
logger.info("Cache update isl info data: {}", data);
handleIslEvent((IslInfoData) data, tuple);
} else if (data instanceof PortInfoData) {
logger.info("Cache update port info data: {}", data);
handlePortEvent((PortInfoData) data, tuple);
} else if (data instanceof FlowInfoData) {
logger.info("Cache update flow data: {}", data);
FlowInfoData flowData = (FlowInfoData) data;
handleFlowEvent(flowData, tuple);
} else if (data instanceof NetworkTopologyChange) {
logger.info("Switch flows reroute request");
NetworkTopologyChange topologyChange = (NetworkTopologyChange) data;
handleNetworkTopologyChangeEvent(topologyChange, tuple);
} else {
logger.error("Skip undefined info data type {}", json);
}
} else {
logger.error("Skip undefined message type {}", json);
}
} catch (CacheException exception) {
logger.error("Could not process message {}", tuple, exception);
} catch (IOException exception) {
logger.error("Could not deserialize message {}", tuple, exception);
} finally {
if (isReceivedCacheInfo) {
outputCollector.ack(tuple);
} else {
outputCollector.fail(tuple);
}
}
logger.trace("State after: {}", state);
}
use of org.openkilda.messaging.info.InfoData in project open-kilda by telstra.
the class SplitterBolt method execute.
/**
* {@inheritDoc}
*/
@Override
public void execute(Tuple tuple) {
String request = tuple.getString(0);
Values values = new Values(request);
try {
Message message = tryMessage(request);
if (message == null || !Destination.WFM.equals(message.getDestination()) || !(message instanceof CommandMessage || message instanceof InfoMessage)) {
return;
}
logger.debug("Request tuple={}", tuple);
/*
* First, try to see if this is a PUSH / UNPUSH (smaller code base vs other).
* NB: InfoMessage was used since it has the relevant attributes/properties for
* pushing the flow.
*/
if (message instanceof InfoMessage) {
InfoData data = ((InfoMessage) message).getData();
if (data instanceof FlowInfoData) {
FlowInfoData fid = (FlowInfoData) data;
String flowId = fid.getFlowId();
values = new Values(message, flowId);
logger.info("Flow {} message: operation={} values={}", flowId, fid.getOperation(), values);
if (fid.getOperation() == FlowOperation.PUSH) {
outputCollector.emit(StreamType.PUSH.toString(), tuple, values);
} else if (fid.getOperation() == FlowOperation.UNPUSH) {
outputCollector.emit(StreamType.UNPUSH.toString(), tuple, values);
} else {
logger.warn("Skip undefined FlowInfoData Operation {}: {}={}", fid.getOperation(), Utils.CORRELATION_ID, message.getCorrelationId());
}
} else {
logger.warn("Skip undefined InfoMessage: {}={}", Utils.CORRELATION_ID, message.getCorrelationId());
}
return;
}
/*
* Second, it isn't an InfoMessage, so it must be a CommandMessage.
*/
CommandData data = ((CommandMessage) message).getData();
if (data instanceof FlowCreateRequest) {
String flowId = ((FlowCreateRequest) data).getPayload().getFlowId();
logger.info("Flow {} create message: values={}", flowId, values);
values = new Values(message, flowId);
outputCollector.emit(StreamType.CREATE.toString(), tuple, values);
} else if (data instanceof FlowDeleteRequest) {
String flowId = ((FlowDeleteRequest) data).getPayload().getFlowId();
logger.info("Flow {} delete message: values={}", flowId, values);
values = new Values(message, flowId);
outputCollector.emit(StreamType.DELETE.toString(), tuple, values);
} else if (data instanceof FlowUpdateRequest) {
String flowId = ((FlowUpdateRequest) data).getPayload().getFlowId();
logger.info("Flow {} update message: values={}", flowId, values);
values = new Values(message, flowId);
outputCollector.emit(StreamType.UPDATE.toString(), tuple, values);
} else if (data instanceof FlowRestoreRequest) {
String flowId = ((FlowRestoreRequest) data).getPayload().getLeft().getFlowId();
logger.info("Flow {} restore message: values={}", flowId, values);
values = new Values(message, flowId);
outputCollector.emit(StreamType.RESTORE.toString(), tuple, values);
} else if (data instanceof FlowRerouteRequest) {
String flowId = ((FlowRerouteRequest) data).getPayload().getFlowId();
logger.info("Flow {} reroute message: values={}", flowId, values);
values = new Values(message, flowId);
outputCollector.emit(StreamType.REROUTE.toString(), tuple, values);
} else if (data instanceof FlowStatusRequest) {
String flowId = ((FlowStatusRequest) data).getPayload().getId();
logger.info("Flow {} status message: values={}", flowId, values);
values = new Values(message, flowId);
outputCollector.emit(StreamType.STATUS.toString(), tuple, values);
} else if (data instanceof FlowGetRequest) {
String flowId = ((FlowGetRequest) data).getPayload().getId();
logger.info("Flow {} get message: values={}", flowId, values);
values = new Values(message, flowId);
outputCollector.emit(StreamType.READ.toString(), tuple, values);
} else if (data instanceof FlowsGetRequest) {
logger.info("Flows get message: values={}", values);
values = new Values(message, null);
outputCollector.emit(StreamType.READ.toString(), tuple, values);
} else if (data instanceof FlowPathRequest) {
String flowId = ((FlowPathRequest) data).getPayload().getId();
logger.info("Flow {} path message: values={}", flowId, values);
values = new Values(message, flowId);
outputCollector.emit(StreamType.PATH.toString(), tuple, values);
} else if (data instanceof FlowCacheSyncRequest) {
logger.info("FlowCacheSyncRequest: values={}", values);
values = new Values(message, null);
outputCollector.emit(StreamType.CACHE_SYNC.toString(), tuple, values);
} else {
logger.debug("Skip undefined CommandMessage: {}={}", Utils.CORRELATION_ID, message.getCorrelationId());
}
/*
* (crimi) This was commented out since the parsing of the message is handled in tryMessage.
* Due to refactoring the kafka topics, it appears more messages are coming to the splitter than
* originally desinged for.
*
* TODO: Fix the cause of excess messages coming to the splitter.
*/
//
// } catch (IOException exception) {
// String message = String.format("Could not deserialize message: %s", request);
// logger.error("{}", message, exception);
//
// ErrorMessage errorMessage = new ErrorMessage(
// new ErrorData(ErrorType.REQUEST_INVALID, message, exception.getMessage()),
// System.currentTimeMillis(), Utils.SYSTEM_CORRELATION_ID, Destination.NORTHBOUND);
//
// values = new Values(errorMessage, ErrorType.INTERNAL_ERROR);
// outputCollector.emit(StreamType.ERROR.toString(), tuple, values);
} finally {
logger.debug("Splitter message ack: component={}, stream={}, tuple={}, values={}", tuple.getSourceComponent(), tuple.getSourceStreamId(), tuple, values);
outputCollector.ack(tuple);
}
}
use of org.openkilda.messaging.info.InfoData in project open-kilda by telstra.
the class OFEventWfmTest method basicLinkDiscoveryTest.
/**
* BasicLinkDiscoveryTest will exercise the basics of Link Discovery test.
* The key results should show up in a kafka topic, which are dumped to file.
*/
@Test
@Ignore
public void basicLinkDiscoveryTest() throws IOException, ConfigurationException, CmdLineException {
System.out.println("==> Starting BasicLinkDiscoveryTest");
OFEventWFMTopology manager = new OFEventWFMTopology(makeLaunchEnvironment());
TopologyConfig config = manager.getConfig();
String topo_input_topic = config.getKafkaTopoDiscoTopic();
Tuple tuple;
KeyValueState<String, Object> state = new InMemoryKeyValueState<>();
initMocks(topo_input_topic);
List<PathNode> nodes = Arrays.asList(new PathNode("sw1", 1, 0, 10L), new PathNode("sw2", 2, 1, 10L));
InfoData data = new IslInfoData(10L, nodes, 10000L, IslChangeType.DISCOVERED, 9000L);
String isl_discovered = MAPPER.writeValueAsString(data);
OFELinkBolt linkBolt = new OFELinkBolt(config);
linkBolt.prepare(stormConfig(), topologyContext, outputCollector);
linkBolt.initState(state);
ArrayList<DiscoveryFilterEntity> skipNodes = new ArrayList<>(1);
skipNodes.add(new DiscoveryFilterEntity("sw1", "1"));
CommandMessage islFilterSetup = new CommandMessage(new DiscoveryFilterPopulateData(skipNodes), 1, "discovery-test", Destination.WFM_OF_DISCOVERY);
String json = MAPPER.writeValueAsString(islFilterSetup);
tuple = new TupleImpl(topologyContext, Collections.singletonList(json), 4, "message");
linkBolt.execute(tuple);
tuple = new TupleImpl(topologyContext, Arrays.asList("sw1", OFEMessageUtils.SWITCH_UP), 0, topo_input_topic);
linkBolt.execute(tuple);
tuple = new TupleImpl(topologyContext, Arrays.asList("sw2", OFEMessageUtils.SWITCH_UP), 0, topo_input_topic);
linkBolt.execute(tuple);
tuple = new TupleImpl(topologyContext, Arrays.asList("sw1", "1", OFEMessageUtils.PORT_UP), 1, topo_input_topic);
linkBolt.execute(tuple);
tuple = new TupleImpl(topologyContext, Arrays.asList("sw1", "2", OFEMessageUtils.PORT_UP), 1, topo_input_topic);
linkBolt.execute(tuple);
Tuple tickTuple = new TupleImpl(topologyContext, Collections.emptyList(), 2, Constants.SYSTEM_TICK_STREAM_ID);
linkBolt.execute(tickTuple);
tuple = new TupleImpl(topologyContext, Collections.singletonList(isl_discovered), 3, topo_input_topic);
linkBolt.execute(tuple);
linkBolt.execute(tickTuple);
linkBolt.execute(tickTuple);
// 1 isls, 3 seconds interval, 9 seconds test duration == 3 discovery commands
// there is only 1 isl each cycle because of isl filter
// messagesExpected = 3 ;
// TODO: (crimi) validate is 7 due to merged topics
messagesExpected = 7;
messagesReceived = outputCollectorMock.getMessagesCount(config.getKafkaTopoDiscoTopic());
Assert.assertEquals(messagesExpected, messagesReceived);
// "isl discovered" x1
// messagesExpected = 1;
// TODO: (crimi) validate is 7 due to merged topics
messagesExpected = 7;
messagesReceived = outputCollectorMock.getMessagesCount(config.getKafkaTopoDiscoTopic());
Assert.assertEquals(messagesExpected, messagesReceived);
linkBolt.execute(tickTuple);
// no new discovery commands
// messagesExpected = 3;
// TODO .. increased from 3 to 7 due to topic changes .. confirm it
messagesExpected = 7;
messagesReceived = outputCollectorMock.getMessagesCount(config.getKafkaTopoDiscoTopic());
Assert.assertEquals(messagesExpected, messagesReceived);
// +1 discovery fails
// messagesExpected = 2;
// TODO .. there should be more or we aren't looking in right place
messagesExpected = 7;
messagesReceived = outputCollectorMock.getMessagesCount(config.getKafkaTopoDiscoTopic());
Assert.assertEquals(messagesExpected, messagesReceived);
}
use of org.openkilda.messaging.info.InfoData in project open-kilda by telstra.
the class FlowTopologyTest method getFlowPayload.
private ImmutablePair<Flow, Flow> getFlowPayload(InfoMessage message) {
InfoData data = message.getData();
FlowInfoData flow = (FlowInfoData) data;
return flow.getPayload();
}
use of org.openkilda.messaging.info.InfoData in project open-kilda by telstra.
the class KafkaMessageConsumer method receive.
/**
* Receives messages from WorkFlowManager queue.
*
* @param record the message object instance
*/
@KafkaListener(topics = "kilda-test")
public void receive(final String record) {
logger.debug("message received: {}", record);
try {
Message message = MAPPER.readValue(record, Message.class);
if (message.getDestination() == null || Destination.TOPOLOGY_ENGINE.equals(message.getDestination())) {
if (message instanceof CommandMessage) {
CommandData data = ((CommandMessage) message).getData();
if (data instanceof FlowCreateRequest) {
FlowPayload payload = ((FlowCreateRequest) data).getPayload();
logger.debug("FlowCreateRequest: {}", payload);
Set<CommandMessage> commands = flowService.createFlow(payload, message.getCorrelationId());
for (CommandMessage response : commands) {
kafkaMessageProducer.send(topic, response);
}
logger.debug("Response send, {}={}", CORRELATION_ID, message.getCorrelationId());
} else if (data instanceof FlowDeleteRequest) {
FlowIdStatusPayload payload = ((FlowDeleteRequest) data).getPayload();
logger.debug("FlowDeleteRequest: {}", payload);
Set<CommandMessage> commands = flowService.deleteFlow(payload, message.getCorrelationId());
for (CommandMessage response : commands) {
kafkaMessageProducer.send(topic, response);
}
logger.debug("Response send, {}={}", CORRELATION_ID, message.getCorrelationId());
} else if (data instanceof FlowUpdateRequest) {
FlowPayload payload = ((FlowUpdateRequest) data).getPayload();
logger.debug("FlowUpdateRequest: {}", payload);
Set<CommandMessage> commands = flowService.updateFlow(payload, message.getCorrelationId());
for (CommandMessage response : commands) {
kafkaMessageProducer.send(topic, response);
}
logger.debug("Response send, {}={}", CORRELATION_ID, message.getCorrelationId());
} else if (data instanceof FlowGetRequest) {
FlowIdStatusPayload payload = ((FlowGetRequest) data).getPayload();
logger.debug("FlowGetRequest: {}", payload);
InfoMessage response = flowService.getFlow(payload, message.getCorrelationId());
kafkaMessageProducer.send(topic, response);
logger.debug("Response send, {}={}", CORRELATION_ID, message.getCorrelationId());
} else if (data instanceof FlowsGetRequest) {
FlowIdStatusPayload payload = ((FlowsGetRequest) data).getPayload();
logger.debug("FlowsGetRequest: {}", payload);
InfoMessage response = flowService.getFlows(payload, message.getCorrelationId());
kafkaMessageProducer.send(topic, response);
logger.debug("Response send, {}={}", CORRELATION_ID, message.getCorrelationId());
} else if (data instanceof FlowPathRequest) {
FlowIdStatusPayload payload = ((FlowPathRequest) data).getPayload();
logger.debug("FlowPathRequest: {}", payload);
InfoMessage response = flowService.pathFlow(payload, message.getCorrelationId());
kafkaMessageProducer.send(topic, response);
logger.debug("Response send, {}={}", CORRELATION_ID, message.getCorrelationId());
} else {
logger.error("Unexpected command message data type: {}", data);
}
} else if (message instanceof InfoMessage) {
InfoData data = ((InfoMessage) message).getData();
if (data instanceof SwitchInfoData) {
SwitchInfoData payload = (SwitchInfoData) data;
switch(payload.getState()) {
case ADDED:
switchService.add(payload);
break;
case ACTIVATED:
switchService.activate(payload);
break;
case DEACTIVATED:
switchService.deactivate(payload);
break;
case REMOVED:
switchService.remove(payload);
break;
case CHANGED:
default:
break;
}
} else if (data instanceof IslInfoData) {
IslInfoData payload = (IslInfoData) data;
islService.discoverLink(payload);
} else {
logger.debug("Unexpected info message data type: {}", data);
}
}
} else {
logger.debug("Skip message: {}", message);
}
} catch (IOException exception) {
logger.error("Could not deserialize message: {}", record, exception);
}
}
Aggregations