use of org.onosproject.net.flow.TrafficSelector in project onos by opennetworkinglab.
the class OvsOfdpaGroupHandler method processEcmpHashedNextObjective.
/**
* In OFDPA2 we do not support the MPLS-ECMP, while we do in
* Open vSwitch implementation.
*
* @param nextObjective the hashed next objective to support.
*/
@Override
protected void processEcmpHashedNextObjective(NextObjective nextObjective) {
// The case for MPLS-ECMP. For now, we try to create a MPLS-ECMP for
// the transport of a VPWS. The necessary info are contained in the
// meta selector. In particular we are looking for the case of BoS==False;
TrafficSelector metaSelector = nextObjective.meta();
if (metaSelector != null && OfdpaPipelineUtility.isNotMplsBos(metaSelector)) {
// storage for all group keys in the chain of groups created
List<Deque<GroupKey>> allGroupKeys = new ArrayList<>();
List<GroupInfo> unsentGroups = new ArrayList<>();
createEcmpHashBucketChains(nextObjective, allGroupKeys, unsentGroups);
// now we can create the outermost MPLS ECMP group
List<GroupBucket> mplsEcmpGroupBuckets = new ArrayList<>();
for (GroupInfo gi : unsentGroups) {
// create ECMP bucket to point to the outer group
TrafficTreatment.Builder ttb = DefaultTrafficTreatment.builder();
ttb.group(new GroupId(gi.nextGroupDesc().givenGroupId()));
GroupBucket sbucket = DefaultGroupBucket.createSelectGroupBucket(ttb.build());
mplsEcmpGroupBuckets.add(sbucket);
}
int mplsEcmpIndex = getNextAvailableIndex();
int mplsEcmpGroupId = makeMplsForwardingGroupId(OfdpaMplsGroupSubType.MPLS_ECMP, mplsEcmpIndex);
GroupKey mplsEmpGroupKey = new DefaultGroupKey(Ofdpa2Pipeline.appKryo.serialize(mplsEcmpIndex));
GroupDescription mplsEcmpGroupDesc = new DefaultGroupDescription(deviceId, GroupDescription.Type.SELECT, new GroupBuckets(mplsEcmpGroupBuckets), mplsEmpGroupKey, mplsEcmpGroupId, nextObjective.appId());
GroupChainElem mplsEcmpGce = new GroupChainElem(mplsEcmpGroupDesc, mplsEcmpGroupBuckets.size(), false, deviceId);
// create objects for local and distributed storage
allGroupKeys.forEach(gkeyChain -> gkeyChain.addFirst(mplsEmpGroupKey));
OfdpaNextGroup ofdpaGrp = new OfdpaNextGroup(allGroupKeys, nextObjective);
// store mplsEcmpGroupKey with the ofdpaGroupChain for the nextObjective
// that depends on it
updatePendingNextObjective(mplsEmpGroupKey, ofdpaGrp);
log.debug("Trying MPLS-ECMP: device:{} gid:{} gkey:{} nextId:{}", deviceId, Integer.toHexString(mplsEcmpGroupId), mplsEmpGroupKey, nextObjective.id());
// finally we are ready to send the innermost groups
for (GroupInfo gi : unsentGroups) {
log.debug("Sending innermost group {} in group chain on device {} ", Integer.toHexString(gi.innerMostGroupDesc().givenGroupId()), deviceId);
updatePendingGroups(gi.nextGroupDesc().appCookie(), mplsEcmpGce);
groupService.addGroup(gi.innerMostGroupDesc());
}
return;
}
super.processEcmpHashedNextObjective(nextObjective);
}
use of org.onosproject.net.flow.TrafficSelector in project onos by opennetworkinglab.
the class OvsOfdpaPipeline method processEthDstSpecific.
@Override
protected Collection<FlowRule> processEthDstSpecific(ForwardingObjective fwd) {
List<FlowRule> rules = new ArrayList<>();
// Build filtered selector
TrafficSelector selector = fwd.selector();
EthCriterion ethCriterion = (EthCriterion) selector.getCriterion(Criterion.Type.ETH_DST);
VlanIdCriterion vlanIdCriterion = (VlanIdCriterion) selector.getCriterion(VLAN_VID);
if (vlanIdCriterion == null) {
log.warn("Forwarding objective for bridging requires vlan. Not " + "installing fwd:{} in dev:{}", fwd.id(), deviceId);
fail(fwd, ObjectiveError.BADPARAMS);
return Collections.emptySet();
}
TrafficSelector.Builder filteredSelectorBuilder = DefaultTrafficSelector.builder();
// Do not match MacAddress for subnet broadcast entry
if (!ethCriterion.mac().equals(NONE) && !ethCriterion.mac().equals(BROADCAST)) {
filteredSelectorBuilder.matchEthDst(ethCriterion.mac());
log.debug("processing L2 forwarding objective:{} -> next:{} in dev:{}", fwd.id(), fwd.nextId(), deviceId);
} else {
log.debug("processing L2 Broadcast forwarding objective:{} -> next:{} " + "in dev:{} for vlan:{}", fwd.id(), fwd.nextId(), deviceId, vlanIdCriterion.vlanId());
}
filteredSelectorBuilder.matchVlanId(vlanIdCriterion.vlanId());
TrafficSelector filteredSelector = filteredSelectorBuilder.build();
if (fwd.treatment() != null) {
log.warn("Ignoring traffic treatment in fwd rule {} meant for L2 table" + "for dev:{}. Expecting only nextId", fwd.id(), deviceId);
}
TrafficTreatment.Builder treatmentBuilder = DefaultTrafficTreatment.builder();
if (fwd.nextId() != null) {
NextGroup next = getGroupForNextObjective(fwd.nextId());
if (next != null) {
List<Deque<GroupKey>> gkeys = appKryo.deserialize(next.data());
// we only need the top level group's key to point the flow to it
Group group = groupService.getGroup(deviceId, gkeys.get(0).peekFirst());
if (group != null) {
treatmentBuilder.deferred().group(group.id());
} else {
log.warn("Group with key:{} for next-id:{} not found in dev:{}", gkeys.get(0).peekFirst(), fwd.nextId(), deviceId);
fail(fwd, ObjectiveError.GROUPMISSING);
return Collections.emptySet();
}
}
}
treatmentBuilder.immediate().transition(ACL_TABLE);
TrafficTreatment filteredTreatment = treatmentBuilder.build();
// Build bridging table entries
FlowRule.Builder flowRuleBuilder = DefaultFlowRule.builder();
flowRuleBuilder.fromApp(fwd.appId()).withPriority(fwd.priority()).forDevice(deviceId).withSelector(filteredSelector).withTreatment(filteredTreatment).forTable(BRIDGING_TABLE);
if (fwd.permanent()) {
flowRuleBuilder.makePermanent();
} else {
flowRuleBuilder.makeTemporary(fwd.timeout());
}
rules.add(flowRuleBuilder.build());
return rules;
}
use of org.onosproject.net.flow.TrafficSelector in project onos by opennetworkinglab.
the class OvsOfdpaPipeline method processEthTypeSpecific.
/*
* Open vSwitch emulation allows MPLS ECMP.
*
* (non-Javadoc)
* @see org.onosproject.driver.pipeline.OFDPA2Pipeline#processEthTypeSpecific
*/
@Override
protected Collection<FlowRule> processEthTypeSpecific(ForwardingObjective fwd) {
if (isDoubleTagged(fwd)) {
return processDoubleTaggedFwd(fwd);
}
TrafficSelector selector = fwd.selector();
EthTypeCriterion ethType = (EthTypeCriterion) selector.getCriterion(Criterion.Type.ETH_TYPE);
if ((ethType == null) || (ethType.ethType().toShort() != Ethernet.TYPE_IPV4) && (ethType.ethType().toShort() != Ethernet.MPLS_UNICAST) && (ethType.ethType().toShort() != Ethernet.TYPE_IPV6)) {
log.warn("processSpecific: Unsupported forwarding objective criteria" + "ethType:{} in dev:{}", ethType, deviceId);
fail(fwd, ObjectiveError.UNSUPPORTED);
return Collections.emptySet();
}
boolean defaultRule = false;
int forTableId;
TrafficSelector.Builder filteredSelector = DefaultTrafficSelector.builder();
TrafficSelector.Builder complementarySelector = DefaultTrafficSelector.builder();
if (ethType.ethType().toShort() == Ethernet.TYPE_IPV4) {
IpPrefix ipv4Dst = ((IPCriterion) selector.getCriterion(Criterion.Type.IPV4_DST)).ip();
if (ipv4Dst.isMulticast()) {
if (ipv4Dst.prefixLength() != 32) {
log.warn("Multicast specific forwarding objective can only be /32");
fail(fwd, ObjectiveError.BADPARAMS);
return ImmutableSet.of();
}
VlanId assignedVlan = readVlanFromSelector(fwd.meta());
if (assignedVlan == null) {
log.warn("VLAN ID required by multicast specific fwd obj is missing. Abort.");
fail(fwd, ObjectiveError.BADPARAMS);
return ImmutableSet.of();
}
filteredSelector.matchVlanId(assignedVlan);
filteredSelector.matchEthType(Ethernet.TYPE_IPV4).matchIPDst(ipv4Dst);
forTableId = MULTICAST_ROUTING_TABLE;
log.debug("processing IPv4 multicast specific forwarding objective {} -> next:{}" + " in dev:{}", fwd.id(), fwd.nextId(), deviceId);
} else {
if (ipv4Dst.prefixLength() == 0) {
// The entire IPV4_DST field is wildcarded intentionally
filteredSelector.matchEthType(Ethernet.TYPE_IPV4);
} else {
filteredSelector.matchEthType(Ethernet.TYPE_IPV4).matchIPDst(ipv4Dst);
}
forTableId = UNICAST_ROUTING_TABLE;
log.debug("processing IPv4 unicast specific forwarding objective {} -> next:{}" + " in dev:{}", fwd.id(), fwd.nextId(), deviceId);
}
} else if (ethType.ethType().toShort() == Ethernet.TYPE_IPV6) {
IpPrefix ipv6Dst = ((IPCriterion) selector.getCriterion(Criterion.Type.IPV6_DST)).ip();
if (ipv6Dst.isMulticast()) {
if (ipv6Dst.prefixLength() != IpAddress.INET6_BIT_LENGTH) {
log.debug("Multicast specific IPv6 forwarding objective can only be /128");
fail(fwd, ObjectiveError.BADPARAMS);
return ImmutableSet.of();
}
VlanId assignedVlan = readVlanFromSelector(fwd.meta());
if (assignedVlan == null) {
log.debug("VLAN ID required by multicast specific fwd obj is missing. Abort.");
fail(fwd, ObjectiveError.BADPARAMS);
return ImmutableSet.of();
}
filteredSelector.matchVlanId(assignedVlan);
filteredSelector.matchEthType(Ethernet.TYPE_IPV6).matchIPv6Dst(ipv6Dst);
forTableId = MULTICAST_ROUTING_TABLE;
log.debug("processing IPv6 multicast specific forwarding objective {} -> next:{}" + " in dev:{}", fwd.id(), fwd.nextId(), deviceId);
} else {
if (buildIpv6Selector(filteredSelector, fwd) < 0) {
return Collections.emptyList();
}
forTableId = UNICAST_ROUTING_TABLE;
}
} else {
filteredSelector.matchEthType(Ethernet.MPLS_UNICAST).matchMplsLabel(((MplsCriterion) selector.getCriterion(Criterion.Type.MPLS_LABEL)).label());
MplsBosCriterion bos = (MplsBosCriterion) selector.getCriterion(Criterion.Type.MPLS_BOS);
if (bos != null) {
filteredSelector.matchMplsBos(bos.mplsBos());
}
forTableId = MPLS_TABLE_1;
log.debug("processing MPLS specific forwarding objective {} -> next:{}" + " in dev {}", fwd.id(), fwd.nextId(), deviceId);
}
TrafficTreatment.Builder tb = DefaultTrafficTreatment.builder();
if (fwd.treatment() != null) {
for (Instruction i : fwd.treatment().allInstructions()) {
if (i instanceof L3ModificationInstruction) {
L3ModificationInstruction l3instr = (L3ModificationInstruction) i;
if (l3instr.subtype().equals(L3ModificationInstruction.L3SubType.TTL_IN) || l3instr.subtype().equals(L3ModificationInstruction.L3SubType.TTL_OUT)) {
continue;
}
}
/*
* NOTE: OF-DPA does not support immediate instruction in
* L3 unicast and MPLS table.
*/
tb.deferred().add(i);
}
}
if (fwd.nextId() != null) {
NextGroup next = getGroupForNextObjective(fwd.nextId());
if (next != null) {
List<Deque<GroupKey>> gkeys = appKryo.deserialize(next.data());
// we only need the top level group's key to point the flow to it
Group group = groupService.getGroup(deviceId, gkeys.get(0).peekFirst());
if (group == null) {
log.warn("Group with key:{} for next-id:{} not found in dev:{}", gkeys.get(0).peekFirst(), fwd.nextId(), deviceId);
fail(fwd, ObjectiveError.GROUPMISSING);
return Collections.emptySet();
}
tb.deferred().group(group.id());
}
}
tb.transition(ACL_TABLE);
FlowRule.Builder ruleBuilder = DefaultFlowRule.builder().fromApp(fwd.appId()).withPriority(fwd.priority()).forDevice(deviceId).withSelector(filteredSelector.build()).withTreatment(tb.build()).forTable(forTableId);
if (fwd.permanent()) {
ruleBuilder.makePermanent();
} else {
ruleBuilder.makeTemporary(fwd.timeout());
}
Collection<FlowRule> flowRuleCollection = new ArrayList<>();
flowRuleCollection.add(ruleBuilder.build());
if (defaultRule) {
flowRuleCollection.add(defaultRoute(fwd, complementarySelector, forTableId, tb));
log.debug("Default rule 0.0.0.0/0 is being installed two rules");
}
return flowRuleCollection;
}
use of org.onosproject.net.flow.TrafficSelector in project onos by opennetworkinglab.
the class OfdpaPipelineTraceable method apply.
@Override
public PipelineTraceableOutput apply(PipelineTraceableInput input) {
PipelineTraceableOutput.Builder outputBuilder = PipelineTraceableOutput.builder();
log.debug("Current packet {} - applying flow tables", input.ingressPacket());
List<FlowEntry> outputFlows = new ArrayList<>();
List<Instruction> deferredInstructions = new ArrayList<>();
PipelineTraceableHitChain currentHitChain = PipelineTraceableHitChain.emptyHitChain();
TrafficSelector currentPacket = DefaultTrafficSelector.builder(input.ingressPacket().packet()).build();
// Init step - find out the first table
int initialTableId = -1;
FlowEntry nextTableIdEntry = findNextTableIdEntry(initialTableId, input.flows());
if (nextTableIdEntry == null) {
currentHitChain.setEgressPacket(new PipelineTraceablePacket(currentPacket));
currentHitChain.dropped();
return outputBuilder.appendToLog("No flow rules for device " + deviceId + ". Aborting").noFlows().addHitChain(currentHitChain).build();
}
// Iterates over the flow tables until the end of the pipeline
TableId tableId = nextTableIdEntry.table();
FlowEntry flowEntry;
boolean lastTable = false;
while (!lastTable) {
log.debug("Searching a Flow Entry on table {} for packet {}", tableId, currentPacket);
// Gets the rule that matches the incoming packet
flowEntry = matchHighestPriority(currentPacket, tableId, input.flows());
log.debug("Found Flow Entry {}", flowEntry);
// If the flow entry on a table is null and we are on hardware we treat as table miss, with few exceptions
if (flowEntry == null && isHardwareSwitch()) {
log.debug("Ofdpa Hw setup, no flow rule means table miss");
if (((IndexTableId) tableId).id() == MPLS_L3_TYPE_TABLE) {
// Apparently a miss but Table 27 on OFDPA is a fixed table
currentPacket = handleOfdpa27FixedTable(input.ingressPacket().packet(), currentPacket);
// The nextTable should be ACL
tableId = IndexTableId.of(ACL_TABLE - 1);
}
// Finding next table to go In case of miss
nextTableIdEntry = findNextTableIdEntry(((IndexTableId) tableId).id(), input.flows());
log.debug("Next table id entry {}", nextTableIdEntry);
// (another possibility is max tableId)
if (nextTableIdEntry == null && currentHitChain.hitChain().size() == 0) {
currentHitChain.setEgressPacket(new PipelineTraceablePacket(currentPacket));
currentHitChain.dropped();
return outputBuilder.appendToLog("No flow rules for device " + deviceId + ". Aborting").noFlows().addHitChain(currentHitChain).build();
} else if (nextTableIdEntry == null) {
// Means that no more flow rules are present
lastTable = true;
} else if (((IndexTableId) tableId).id() == TMAC_TABLE) {
// If the table is 20 OFDPA skips to table 50
log.debug("A miss on Table 20 on OFDPA means that we skip directly to table 50");
tableId = IndexTableId.of(BRIDGING_TABLE);
} else if (((IndexTableId) tableId).id() == MULTICAST_ROUTING_TABLE) {
// If the table is 40 OFDPA skips to table 60
log.debug("A miss on Table 40 on OFDPA means that we skip directly to table 60");
tableId = IndexTableId.of(ACL_TABLE);
} else {
tableId = nextTableIdEntry.table();
}
} else if (flowEntry == null) {
currentHitChain.setEgressPacket(new PipelineTraceablePacket(currentPacket));
currentHitChain.dropped();
return outputBuilder.appendToLog("Packet has no match on table " + tableId + " in device " + deviceId + ". Dropping").noFlows().addHitChain(currentHitChain).build();
} else {
// If the table has a transition
if (flowEntry.treatment().tableTransition() != null) {
// Updates the next table we transitions to
tableId = IndexTableId.of(flowEntry.treatment().tableTransition().tableId());
log.debug("Flow Entry has transition to table Id {}", tableId);
currentHitChain.addDataPlaneEntity(new DataPlaneEntity(flowEntry));
} else {
// Table has no transition so it means that it's an output rule if on the last table
log.debug("Flow Entry has no transition to table, treating as last rule {}", flowEntry);
currentHitChain.addDataPlaneEntity(new DataPlaneEntity(flowEntry));
outputFlows.add(flowEntry);
lastTable = true;
}
// Updates the packet according to the immediate actions of this flow rule.
currentPacket = updatePacket(currentPacket, flowEntry.treatment().immediate()).build();
// Saves the deferred rules for later maintaining the order
deferredInstructions.addAll(flowEntry.treatment().deferred());
// If the flow requires to clear deferred actions we do so for all the ones we encountered.
if (flowEntry.treatment().clearedDeferred()) {
deferredInstructions.clear();
}
// On table 10 OFDPA needs two rules to apply the vlan if none and then to transition to the next table.
if (shouldMatchSecondVlanFlow(flowEntry)) {
// Let's get the packet vlanId instruction
VlanIdCriterion packetVlanIdCriterion = (VlanIdCriterion) currentPacket.getCriterion(Criterion.Type.VLAN_VID);
// Let's get the flow entry vlan mod instructions
ModVlanIdInstruction entryModVlanIdInstruction = (ModVlanIdInstruction) flowEntry.treatment().immediate().stream().filter(instruction -> instruction instanceof ModVlanIdInstruction).findFirst().orElse(null);
// is a flow rule that matches on same criteria and with updated vlanId
if (entryModVlanIdInstruction != null) {
FlowEntry secondVlanFlow = getSecondFlowEntryOnTable10(currentPacket, packetVlanIdCriterion, entryModVlanIdInstruction, input.flows());
// We found the flow that we expected
if (secondVlanFlow != null) {
currentHitChain.addDataPlaneEntity(new DataPlaneEntity(secondVlanFlow));
} else {
currentHitChain.setEgressPacket(new PipelineTraceablePacket(currentPacket));
currentHitChain.dropped();
return outputBuilder.appendToLog("Missing forwarding rule for tagged" + " packet on " + deviceId).noFlows().addHitChain(currentHitChain).build();
}
}
}
}
}
// Creating a modifiable builder for the egress packet
TrafficSelector.Builder egressPacket = DefaultTrafficSelector.builder(currentPacket);
log.debug("Current packet {} - applying output flows", currentPacket);
// Handling output flows which basically means handling output to controller.
// OVS and OFDPA have both immediate -> OUTPUT:CONTROLLER. Theoretically there is no
// need to reflect the updates performed on the packets and on the chain.
List<PortNumber> outputPorts = new ArrayList<>();
handleOutputFlows(currentPacket, outputFlows, egressPacket, outputPorts, currentHitChain, outputBuilder, input.ingressPacket().packet());
// Immediate instructions
log.debug("Current packet {} - applying immediate instructions", currentPacket);
// Handling immediate instructions which basically means handling output to controller.
// OVS has immediate -> group -> OUTPUT:CONTROLLER.
List<DataPlaneEntity> entries = ImmutableList.copyOf(currentHitChain.hitChain());
// Go to the next step - using a copy of the egress packet and of the hit chain
PipelineTraceableHitChain newHitChain = PipelineTraceableHitChain.emptyHitChain();
currentHitChain.hitChain().forEach(newHitChain::addDataPlaneEntity);
TrafficSelector.Builder newEgressPacket = DefaultTrafficSelector.builder(egressPacket.build());
for (DataPlaneEntity entry : entries) {
flowEntry = entry.getFlowEntry();
if (flowEntry != null) {
getGroupsFromInstructions(input.groups(), flowEntry.treatment().immediate(), newEgressPacket, outputPorts, newHitChain, outputBuilder, input, false);
}
}
// Deferred instructions
log.debug("Current packet {} - applying deferred instructions", egressPacket.build());
// egress packet and of the hit chain. This is the last step.
if (deferredInstructions.size() > 0) {
handleDeferredActions(egressPacket.build(), input.groups(), deferredInstructions, outputPorts, currentHitChain, outputBuilder, input);
}
// Let's store the partial hit chain and set a message
if (outputPorts.isEmpty()) {
currentHitChain.setEgressPacket(new PipelineTraceablePacket(egressPacket.build()));
currentHitChain.dropped();
outputBuilder.appendToLog("Packet has no output in device " + deviceId + ". Dropping").dropped().addHitChain(currentHitChain);
}
// Done!
return outputBuilder.build();
}
use of org.onosproject.net.flow.TrafficSelector in project onos by opennetworkinglab.
the class OfdpaPipelineTraceable method handleVlanToController.
// If the initial packet comes tagged with a Vlan we output it with that to ONOS.
// If ONOS applied a vlan we remove it.
// TODO Candidate for an AbstractBehavior implementation
private void handleVlanToController(PipelineTraceableHitChain currentHitChain, TrafficSelector initialPacket) {
VlanIdCriterion initialVid = (VlanIdCriterion) initialPacket.getCriterion(Criterion.Type.VLAN_VID);
VlanIdCriterion finalVid = (VlanIdCriterion) currentHitChain.egressPacket().packet().getCriterion(Criterion.Type.VLAN_VID);
if (initialVid != null && !initialVid.equals(finalVid) && initialVid.vlanId().equals(VlanId.NONE)) {
Set<Criterion> finalCriteria = new HashSet<>(currentHitChain.egressPacket().packet().criteria());
// removing the final vlanId
finalCriteria.remove(finalVid);
TrafficSelector.Builder packetUpdated = DefaultTrafficSelector.builder();
finalCriteria.forEach(packetUpdated::add);
// Initial was none so we set it to that
packetUpdated.add(Criteria.matchVlanId(VlanId.NONE));
// Update final packet
currentHitChain.setEgressPacket(new PipelineTraceablePacket(packetUpdated.build()));
}
}
Aggregations