use of org.openkilda.messaging.info.flow.FlowInfoData in project open-kilda by telstra.
the class CacheBolt method doWork.
/**
* {@inheritDoc}
*/
@Override
public void doWork(Tuple tuple) {
if (CtrlAction.boltHandlerEntrance(this, tuple))
return;
logger.trace("State before: {}", state);
String json = tuple.getString(0);
String source = tuple.getSourceComponent();
// TODO: Eliminate the inefficiency introduced through the hack
try {
logger.info("Received cache data={}", tuple);
BaseMessage bm = MAPPER.readValue(json, BaseMessage.class);
if (bm instanceof InfoMessage) {
InfoMessage message = (InfoMessage) bm;
InfoData data = message.getData();
if (data instanceof NetworkInfoData) {
logger.debug("Storage content message {}", json);
handleNetworkDump(data, tuple);
isReceivedCacheInfo = true;
} else if (!isReceivedCacheInfo) {
logger.debug("Cache message fail due bolt not initialized: " + "component={}, stream={}, tuple={}", tuple.getSourceComponent(), tuple.getSourceStreamId(), tuple);
} else if (data instanceof SwitchInfoData) {
logger.info("Cache update switch info data: {}", data);
handleSwitchEvent((SwitchInfoData) data, tuple);
} else if (data instanceof IslInfoData) {
logger.info("Cache update isl info data: {}", data);
handleIslEvent((IslInfoData) data, tuple);
} else if (data instanceof PortInfoData) {
logger.info("Cache update port info data: {}", data);
handlePortEvent((PortInfoData) data, tuple);
} else if (data instanceof FlowInfoData) {
logger.info("Cache update flow data: {}", data);
FlowInfoData flowData = (FlowInfoData) data;
handleFlowEvent(flowData, tuple);
} else if (data instanceof NetworkTopologyChange) {
logger.info("Switch flows reroute request");
NetworkTopologyChange topologyChange = (NetworkTopologyChange) data;
handleNetworkTopologyChangeEvent(topologyChange, tuple);
} else {
logger.error("Skip undefined info data type {}", json);
}
} else {
logger.error("Skip undefined message type {}", json);
}
} catch (CacheException exception) {
logger.error("Could not process message {}", tuple, exception);
} catch (IOException exception) {
logger.error("Could not deserialize message {}", tuple, exception);
} finally {
if (isReceivedCacheInfo) {
outputCollector.ack(tuple);
} else {
outputCollector.fail(tuple);
}
}
logger.trace("State after: {}", state);
}
use of org.openkilda.messaging.info.flow.FlowInfoData in project open-kilda by telstra.
the class SplitterBolt method execute.
/**
* {@inheritDoc}
*/
@Override
public void execute(Tuple tuple) {
String request = tuple.getString(0);
Values values = new Values(request);
try {
Message message = tryMessage(request);
if (message == null || !Destination.WFM.equals(message.getDestination()) || !(message instanceof CommandMessage || message instanceof InfoMessage)) {
return;
}
logger.debug("Request tuple={}", tuple);
/*
* First, try to see if this is a PUSH / UNPUSH (smaller code base vs other).
* NB: InfoMessage was used since it has the relevant attributes/properties for
* pushing the flow.
*/
if (message instanceof InfoMessage) {
InfoData data = ((InfoMessage) message).getData();
if (data instanceof FlowInfoData) {
FlowInfoData fid = (FlowInfoData) data;
String flowId = fid.getFlowId();
values = new Values(message, flowId);
logger.info("Flow {} message: operation={} values={}", flowId, fid.getOperation(), values);
if (fid.getOperation() == FlowOperation.PUSH) {
outputCollector.emit(StreamType.PUSH.toString(), tuple, values);
} else if (fid.getOperation() == FlowOperation.UNPUSH) {
outputCollector.emit(StreamType.UNPUSH.toString(), tuple, values);
} else {
logger.warn("Skip undefined FlowInfoData Operation {}: {}={}", fid.getOperation(), Utils.CORRELATION_ID, message.getCorrelationId());
}
} else {
logger.warn("Skip undefined InfoMessage: {}={}", Utils.CORRELATION_ID, message.getCorrelationId());
}
return;
}
/*
* Second, it isn't an InfoMessage, so it must be a CommandMessage.
*/
CommandData data = ((CommandMessage) message).getData();
if (data instanceof FlowCreateRequest) {
String flowId = ((FlowCreateRequest) data).getPayload().getFlowId();
logger.info("Flow {} create message: values={}", flowId, values);
values = new Values(message, flowId);
outputCollector.emit(StreamType.CREATE.toString(), tuple, values);
} else if (data instanceof FlowDeleteRequest) {
String flowId = ((FlowDeleteRequest) data).getPayload().getFlowId();
logger.info("Flow {} delete message: values={}", flowId, values);
values = new Values(message, flowId);
outputCollector.emit(StreamType.DELETE.toString(), tuple, values);
} else if (data instanceof FlowUpdateRequest) {
String flowId = ((FlowUpdateRequest) data).getPayload().getFlowId();
logger.info("Flow {} update message: values={}", flowId, values);
values = new Values(message, flowId);
outputCollector.emit(StreamType.UPDATE.toString(), tuple, values);
} else if (data instanceof FlowRestoreRequest) {
String flowId = ((FlowRestoreRequest) data).getPayload().getLeft().getFlowId();
logger.info("Flow {} restore message: values={}", flowId, values);
values = new Values(message, flowId);
outputCollector.emit(StreamType.RESTORE.toString(), tuple, values);
} else if (data instanceof FlowRerouteRequest) {
String flowId = ((FlowRerouteRequest) data).getPayload().getFlowId();
logger.info("Flow {} reroute message: values={}", flowId, values);
values = new Values(message, flowId);
outputCollector.emit(StreamType.REROUTE.toString(), tuple, values);
} else if (data instanceof FlowStatusRequest) {
String flowId = ((FlowStatusRequest) data).getPayload().getId();
logger.info("Flow {} status message: values={}", flowId, values);
values = new Values(message, flowId);
outputCollector.emit(StreamType.STATUS.toString(), tuple, values);
} else if (data instanceof FlowGetRequest) {
String flowId = ((FlowGetRequest) data).getPayload().getId();
logger.info("Flow {} get message: values={}", flowId, values);
values = new Values(message, flowId);
outputCollector.emit(StreamType.READ.toString(), tuple, values);
} else if (data instanceof FlowsGetRequest) {
logger.info("Flows get message: values={}", values);
values = new Values(message, null);
outputCollector.emit(StreamType.READ.toString(), tuple, values);
} else if (data instanceof FlowPathRequest) {
String flowId = ((FlowPathRequest) data).getPayload().getId();
logger.info("Flow {} path message: values={}", flowId, values);
values = new Values(message, flowId);
outputCollector.emit(StreamType.PATH.toString(), tuple, values);
} else if (data instanceof FlowCacheSyncRequest) {
logger.info("FlowCacheSyncRequest: values={}", values);
values = new Values(message, null);
outputCollector.emit(StreamType.CACHE_SYNC.toString(), tuple, values);
} else {
logger.debug("Skip undefined CommandMessage: {}={}", Utils.CORRELATION_ID, message.getCorrelationId());
}
/*
* (crimi) This was commented out since the parsing of the message is handled in tryMessage.
* Due to refactoring the kafka topics, it appears more messages are coming to the splitter than
* originally desinged for.
*
* TODO: Fix the cause of excess messages coming to the splitter.
*/
//
// } catch (IOException exception) {
// String message = String.format("Could not deserialize message: %s", request);
// logger.error("{}", message, exception);
//
// ErrorMessage errorMessage = new ErrorMessage(
// new ErrorData(ErrorType.REQUEST_INVALID, message, exception.getMessage()),
// System.currentTimeMillis(), Utils.SYSTEM_CORRELATION_ID, Destination.NORTHBOUND);
//
// values = new Values(errorMessage, ErrorType.INTERNAL_ERROR);
// outputCollector.emit(StreamType.ERROR.toString(), tuple, values);
} finally {
logger.debug("Splitter message ack: component={}, stream={}, tuple={}, values={}", tuple.getSourceComponent(), tuple.getSourceStreamId(), tuple, values);
outputCollector.ack(tuple);
}
}
use of org.openkilda.messaging.info.flow.FlowInfoData in project open-kilda by telstra.
the class CacheTopologyTest method flowShouldBeReroutedWhenIslDies.
@Ignore
@Test
public void flowShouldBeReroutedWhenIslDies() throws Exception {
final String destSwitchId = "destSwitch";
final String flowId = "flowId";
sendData(sw);
SwitchInfoData destSwitch = new SwitchInfoData(destSwitchId, SwitchState.ACTIVATED, StringUtils.EMPTY, StringUtils.EMPTY, StringUtils.EMPTY, StringUtils.EMPTY);
sendData(destSwitch);
List<PathNode> path = ImmutableList.of(new PathNode(sw.getSwitchId(), 0, 0), new PathNode(destSwitch.getSwitchId(), 0, 1));
IslInfoData isl = new IslInfoData(0L, path, 0L, IslChangeType.DISCOVERED, 0L);
sendData(isl);
FlowInfoData flowData = buildFlowInfoData(flowId, sw.getSwitchId(), destSwitchId, path);
sendData(flowData);
// mark isl as failed
flowConsumer.clear();
isl.setState(IslChangeType.FAILED);
sendData(isl);
// we are expecting that flow should be rerouted
ConsumerRecord<String, String> record = flowConsumer.pollMessage();
assertNotNull(record);
CommandMessage message = objectMapper.readValue(record.value(), CommandMessage.class);
assertNotNull(message);
FlowRerouteRequest command = (FlowRerouteRequest) message.getData();
assertTrue(command.getPayload().getFlowId().equals(flowId));
}
use of org.openkilda.messaging.info.flow.FlowInfoData in project open-kilda by telstra.
the class CacheTopologyTest method buildFlowInfoData.
private FlowInfoData buildFlowInfoData(String flowId, String srcSwitch, String dstSwitch, List<PathNode> path) {
Flow flow = new Flow();
flow.setFlowId(flowId);
flow.setSourceSwitch(srcSwitch);
flow.setDestinationSwitch(dstSwitch);
flow.setState(FlowState.UP);
PathInfoData pathInfoData = new PathInfoData(0L, path);
flow.setFlowPath(pathInfoData);
ImmutablePair<Flow, Flow> immutablePair = new ImmutablePair<>(flow, flow);
return new FlowInfoData(flowId, immutablePair, FlowOperation.CREATE, UUID.randomUUID().toString());
}
use of org.openkilda.messaging.info.flow.FlowInfoData in project open-kilda by telstra.
the class FlowTopologyTest method getFlowPayload.
private ImmutablePair<Flow, Flow> getFlowPayload(InfoMessage message) {
InfoData data = message.getData();
FlowInfoData flow = (FlowInfoData) data;
return flow.getPayload();
}
Aggregations