use of org.openkilda.messaging.info.InfoMessage in project open-kilda by telstra.
the class CrudBolt method handleCacheSyncRequest.
private void handleCacheSyncRequest(CommandMessage message, Tuple tuple) throws IOException {
logger.info("CACHE SYNCE: {}", message);
// NB: This is going to be a "bulky" operation - get all flows from DB, and synchronize
// with the cache.
List<String> droppedFlows = new ArrayList<>();
List<String> addedFlows = new ArrayList<>();
List<String> modifiedFlows = new ArrayList<>();
List<String> unchangedFlows = new ArrayList<>();
List<FlowInfo> flowInfos = pathComputer.getFlowInfo();
// Instead of determining left/right .. store based on flowid_& cookie
HashMap<String, FlowInfo> flowToInfo = new HashMap<>();
for (FlowInfo fi : flowInfos) {
flowToInfo.put(fi.getFlowId() + fi.getCookie(), fi);
}
// We first look at comparing what is in the DB to what is in the Cache
for (FlowInfo fi : flowInfos) {
String flowid = fi.getFlowId();
if (flowCache.cacheContainsFlow(flowid)) {
// TODO: better, more holistic comparison
// TODO: if the flow is modified, then just leverage drop / add primitives.
// TODO: Ensure that the DB is always the source of truth - cache and db ops part of transaction.
// Need to compare both sides
ImmutablePair<Flow, Flow> fc = flowCache.getFlow(flowid);
int count = modifiedFlows.size();
if (fi.getCookie() != fc.left.getCookie() && fi.getCookie() != fc.right.getCookie())
modifiedFlows.add("cookie: " + flowid + ":" + fi.getCookie() + ":" + fc.left.getCookie() + ":" + fc.right.getCookie());
if (fi.getMeterId() != fc.left.getMeterId() && fi.getMeterId() != fc.right.getMeterId())
modifiedFlows.add("meter: " + flowid + ":" + fi.getMeterId() + ":" + fc.left.getMeterId() + ":" + fc.right.getMeterId());
if (fi.getTransitVlanId() != fc.left.getTransitVlan() && fi.getTransitVlanId() != fc.right.getTransitVlan())
modifiedFlows.add("transit: " + flowid + ":" + fi.getTransitVlanId() + ":" + fc.left.getTransitVlan() + ":" + fc.right.getTransitVlan());
if (!fi.getSrcSwitchId().equals(fc.left.getSourceSwitch()) && !fi.getSrcSwitchId().equals(fc.right.getSourceSwitch()))
modifiedFlows.add("switch: " + flowid + "|" + fi.getSrcSwitchId() + "|" + fc.left.getSourceSwitch() + "|" + fc.right.getSourceSwitch());
if (count == modifiedFlows.size())
unchangedFlows.add(flowid);
} else {
// TODO: need to get the flow from the DB and add it properly
addedFlows.add(flowid);
}
}
// Now we see if the cache holds things not in the DB
for (ImmutablePair<Flow, Flow> flow : flowCache.dumpFlows()) {
String key = flow.left.getFlowId() + flow.left.getCookie();
// compare the left .. if it is in, then check the right .. o/w remove it (no need to check right
if (!flowToInfo.containsKey(key)) {
/* (carmine) - This code is to drop the flow from the cache since it isn't in the DB
* But - the user can just as easily call delete in the NB API .. which should do the right thing.
* So, for now, just add the flow id.
*/
// String removedFlow = flowCache.removeFlow(flow.left.getFlowId()).toString();
// String asJson = MAPPER.writeValueAsString(removedFlow);
// droppedFlows.add(asJson);
droppedFlows.add(flow.left.getFlowId());
} else {
key = flow.right.getFlowId() + flow.right.getCookie();
if (!flowToInfo.containsKey(key)) {
// (carmine) - same comment..
// String removedFlow = flowCache.removeFlow(flow.left.getFlowId()).toString();
// String asJson = MAPPER.writeValueAsString(removedFlow);
// droppedFlows.add(asJson);
droppedFlows.add(flow.right.getFlowId());
}
}
}
FlowCacheSyncResults results = new FlowCacheSyncResults(droppedFlows.toArray(new String[0]), addedFlows.toArray(new String[0]), modifiedFlows.toArray(new String[0]), unchangedFlows.toArray(new String[0]));
Values northbound = new Values(new InfoMessage(new FlowCacheSyncResponse(results), message.getTimestamp(), message.getCorrelationId(), Destination.NORTHBOUND));
outputCollector.emit(StreamType.RESPONSE.toString(), tuple, northbound);
}
use of org.openkilda.messaging.info.InfoMessage in project open-kilda by telstra.
the class SplitterBolt method execute.
/**
* {@inheritDoc}
*/
@Override
public void execute(Tuple tuple) {
String request = tuple.getString(0);
Values values = new Values(request);
try {
Message message = tryMessage(request);
if (message == null || !Destination.WFM.equals(message.getDestination()) || !(message instanceof CommandMessage || message instanceof InfoMessage)) {
return;
}
logger.debug("Request tuple={}", tuple);
/*
* First, try to see if this is a PUSH / UNPUSH (smaller code base vs other).
* NB: InfoMessage was used since it has the relevant attributes/properties for
* pushing the flow.
*/
if (message instanceof InfoMessage) {
InfoData data = ((InfoMessage) message).getData();
if (data instanceof FlowInfoData) {
FlowInfoData fid = (FlowInfoData) data;
String flowId = fid.getFlowId();
values = new Values(message, flowId);
logger.info("Flow {} message: operation={} values={}", flowId, fid.getOperation(), values);
if (fid.getOperation() == FlowOperation.PUSH) {
outputCollector.emit(StreamType.PUSH.toString(), tuple, values);
} else if (fid.getOperation() == FlowOperation.UNPUSH) {
outputCollector.emit(StreamType.UNPUSH.toString(), tuple, values);
} else {
logger.warn("Skip undefined FlowInfoData Operation {}: {}={}", fid.getOperation(), Utils.CORRELATION_ID, message.getCorrelationId());
}
} else {
logger.warn("Skip undefined InfoMessage: {}={}", Utils.CORRELATION_ID, message.getCorrelationId());
}
return;
}
/*
* Second, it isn't an InfoMessage, so it must be a CommandMessage.
*/
CommandData data = ((CommandMessage) message).getData();
if (data instanceof FlowCreateRequest) {
String flowId = ((FlowCreateRequest) data).getPayload().getFlowId();
logger.info("Flow {} create message: values={}", flowId, values);
values = new Values(message, flowId);
outputCollector.emit(StreamType.CREATE.toString(), tuple, values);
} else if (data instanceof FlowDeleteRequest) {
String flowId = ((FlowDeleteRequest) data).getPayload().getFlowId();
logger.info("Flow {} delete message: values={}", flowId, values);
values = new Values(message, flowId);
outputCollector.emit(StreamType.DELETE.toString(), tuple, values);
} else if (data instanceof FlowUpdateRequest) {
String flowId = ((FlowUpdateRequest) data).getPayload().getFlowId();
logger.info("Flow {} update message: values={}", flowId, values);
values = new Values(message, flowId);
outputCollector.emit(StreamType.UPDATE.toString(), tuple, values);
} else if (data instanceof FlowRestoreRequest) {
String flowId = ((FlowRestoreRequest) data).getPayload().getLeft().getFlowId();
logger.info("Flow {} restore message: values={}", flowId, values);
values = new Values(message, flowId);
outputCollector.emit(StreamType.RESTORE.toString(), tuple, values);
} else if (data instanceof FlowRerouteRequest) {
String flowId = ((FlowRerouteRequest) data).getPayload().getFlowId();
logger.info("Flow {} reroute message: values={}", flowId, values);
values = new Values(message, flowId);
outputCollector.emit(StreamType.REROUTE.toString(), tuple, values);
} else if (data instanceof FlowStatusRequest) {
String flowId = ((FlowStatusRequest) data).getPayload().getId();
logger.info("Flow {} status message: values={}", flowId, values);
values = new Values(message, flowId);
outputCollector.emit(StreamType.STATUS.toString(), tuple, values);
} else if (data instanceof FlowGetRequest) {
String flowId = ((FlowGetRequest) data).getPayload().getId();
logger.info("Flow {} get message: values={}", flowId, values);
values = new Values(message, flowId);
outputCollector.emit(StreamType.READ.toString(), tuple, values);
} else if (data instanceof FlowsGetRequest) {
logger.info("Flows get message: values={}", values);
values = new Values(message, null);
outputCollector.emit(StreamType.READ.toString(), tuple, values);
} else if (data instanceof FlowPathRequest) {
String flowId = ((FlowPathRequest) data).getPayload().getId();
logger.info("Flow {} path message: values={}", flowId, values);
values = new Values(message, flowId);
outputCollector.emit(StreamType.PATH.toString(), tuple, values);
} else if (data instanceof FlowCacheSyncRequest) {
logger.info("FlowCacheSyncRequest: values={}", values);
values = new Values(message, null);
outputCollector.emit(StreamType.CACHE_SYNC.toString(), tuple, values);
} else {
logger.debug("Skip undefined CommandMessage: {}={}", Utils.CORRELATION_ID, message.getCorrelationId());
}
/*
* (crimi) This was commented out since the parsing of the message is handled in tryMessage.
* Due to refactoring the kafka topics, it appears more messages are coming to the splitter than
* originally desinged for.
*
* TODO: Fix the cause of excess messages coming to the splitter.
*/
//
// } catch (IOException exception) {
// String message = String.format("Could not deserialize message: %s", request);
// logger.error("{}", message, exception);
//
// ErrorMessage errorMessage = new ErrorMessage(
// new ErrorData(ErrorType.REQUEST_INVALID, message, exception.getMessage()),
// System.currentTimeMillis(), Utils.SYSTEM_CORRELATION_ID, Destination.NORTHBOUND);
//
// values = new Values(errorMessage, ErrorType.INTERNAL_ERROR);
// outputCollector.emit(StreamType.ERROR.toString(), tuple, values);
} finally {
logger.debug("Splitter message ack: component={}, stream={}, tuple={}, values={}", tuple.getSourceComponent(), tuple.getSourceStreamId(), tuple, values);
outputCollector.ack(tuple);
}
}
use of org.openkilda.messaging.info.InfoMessage in project open-kilda by telstra.
the class TopologyEngineBolt method execute.
/**
* {@inheritDoc}
*/
@Override
public void execute(Tuple tuple) {
String request = tuple.getString(0);
Values values = null;
try {
Message message = MAPPER.readValue(request, Message.class);
if (!Destination.WFM.equals(message.getDestination())) {
return;
}
logger.debug("Request tuple={}", tuple);
if (message instanceof CommandMessage) {
CommandData data = ((CommandMessage) message).getData();
if (data instanceof BaseInstallFlow) {
BaseInstallFlow installData = (BaseInstallFlow) data;
Long transactionId = UUID.randomUUID().getLeastSignificantBits();
installData.setTransactionId(transactionId);
String switchId = installData.getSwitchId();
String flowId = installData.getId();
logger.debug("Flow install message: {}={}, switch-id={}, {}={}, {}={}, message={}", Utils.CORRELATION_ID, message.getCorrelationId(), switchId, Utils.FLOW_ID, flowId, Utils.TRANSACTION_ID, transactionId, request);
// FIXME(surabujin): send here and in TE
message.setDestination(Destination.CONTROLLER);
values = new Values(MAPPER.writeValueAsString(message), switchId, flowId, transactionId);
outputCollector.emit(StreamType.CREATE.toString(), tuple, values);
} else if (data instanceof RemoveFlow) {
RemoveFlow removeData = (RemoveFlow) data;
Long transactionId = UUID.randomUUID().getLeastSignificantBits();
removeData.setTransactionId(transactionId);
String switchId = removeData.getSwitchId();
String flowId = removeData.getId();
logger.debug("Flow remove message: {}={}, switch-id={}, {}={}, {}={}, message={}", Utils.CORRELATION_ID, message.getCorrelationId(), switchId, Utils.FLOW_ID, flowId, Utils.TRANSACTION_ID, transactionId, request);
message.setDestination(Destination.CONTROLLER);
values = new Values(MAPPER.writeValueAsString(message), switchId, flowId, transactionId);
outputCollector.emit(StreamType.DELETE.toString(), tuple, values);
} else {
logger.debug("Skip undefined command message: {}={}, message={}", Utils.CORRELATION_ID, message.getCorrelationId(), request);
}
} else if (message instanceof InfoMessage) {
values = new Values(message);
logger.debug("Flow response message: {}={}, message={}", Utils.CORRELATION_ID, message.getCorrelationId(), request);
outputCollector.emit(StreamType.RESPONSE.toString(), tuple, values);
} else if (message instanceof ErrorMessage) {
String flowId = ((ErrorMessage) message).getData().getErrorDescription();
logger.error("Flow error message: {}={}, {}={}, message={}", Utils.CORRELATION_ID, message.getCorrelationId(), Utils.FLOW_ID, flowId, request);
values = new Values(message, flowId);
outputCollector.emit(StreamType.STATUS.toString(), tuple, values);
} else {
logger.debug("Skip undefined message: {}={}, message={}", Utils.CORRELATION_ID, message.getCorrelationId(), request);
}
} catch (IOException exception) {
logger.error("Could not deserialize message={}", request, exception);
} finally {
logger.debug("Topology-Engine message ack: component={}, stream={}, tuple={}, values={}", tuple.getSourceComponent(), tuple.getSourceStreamId(), tuple, values);
outputCollector.ack(tuple);
}
}
use of org.openkilda.messaging.info.InfoMessage in project open-kilda by telstra.
the class MeterConfigMetricGenBolt method execute.
@Override
public void execute(Tuple input) {
StatsComponentType componentId = StatsComponentType.valueOf(input.getSourceComponent());
InfoMessage message = (InfoMessage) input.getValueByField(MESSAGE_FIELD);
if (!Destination.WFM_STATS.equals(message.getDestination())) {
collector.ack(input);
return;
}
LOGGER.debug("Meter config stats message: {}={}, component={}, stream={}", CORRELATION_ID, message.getCorrelationId(), componentId, StatsStreamType.valueOf(input.getSourceStreamId()));
MeterConfigStatsData data = (MeterConfigStatsData) message.getData();
long timestamp = message.getTimestamp();
try {
String switchId = data.getSwitchId().replaceAll(":", "");
for (MeterConfigReply reply : data.getStats()) {
for (Long meterId : reply.getMeterIds()) {
emit(timestamp, meterId, switchId);
}
}
} finally {
collector.ack(input);
}
}
use of org.openkilda.messaging.info.InfoMessage in project open-kilda by telstra.
the class PortMetricGenBolt method execute.
@Override
public void execute(Tuple input) {
StatsComponentType componentId = StatsComponentType.valueOf(input.getSourceComponent());
InfoMessage message = (InfoMessage) input.getValueByField(MESSAGE_FIELD);
if (!Destination.WFM_STATS.equals(message.getDestination())) {
collector.ack(input);
return;
}
LOGGER.debug("Port stats message: {}={}, component={}, stream={}", CORRELATION_ID, message.getCorrelationId(), componentId, StatsStreamType.valueOf(input.getSourceStreamId()));
PortStatsData data = (PortStatsData) message.getData();
long timestamp = message.getTimestamp();
try {
String switchId = switchNameCache.get(data.getSwitchId());
if (switchId == null) {
switchId = "SW" + data.getSwitchId().replaceAll(":", "").toUpperCase();
switchNameCache.put(data.getSwitchId(), switchId);
}
for (PortStatsReply reply : data.getStats()) {
for (PortStatsEntry entry : reply.getEntries()) {
emit(entry, timestamp, switchId);
}
}
} finally {
collector.ack(input);
}
}
Aggregations