use of org.batfish.datamodel.Configuration in project batfish by batfish.
the class Batfish method reducedReachability.
@Override
public AnswerElement reducedReachability(ReachabilitySettings reachabilitySettings) {
Settings settings = getSettings();
checkDifferentialDataPlaneQuestionDependencies();
String tag = getDifferentialFlowTag();
// load base configurations and generate base data plane
pushBaseEnvironment();
Map<String, Configuration> baseConfigurations = loadConfigurations();
Synthesizer baseDataPlaneSynthesizer = synthesizeDataPlane();
popEnvironment();
// load diff configurations and generate diff data plane
pushDeltaEnvironment();
Map<String, Configuration> diffConfigurations = loadConfigurations();
Synthesizer diffDataPlaneSynthesizer = synthesizeDataPlane();
popEnvironment();
Set<String> ingressNodes;
try {
ingressNodes = ImmutableSet.copyOf(Sets.intersection(reachabilitySettings.computeActiveIngressNodes(baseConfigurations), reachabilitySettings.computeActiveIngressNodes(diffConfigurations)));
} catch (InvalidReachabilitySettingsException e) {
return e.getInvalidSettingsAnswer();
}
pushDeltaEnvironment();
SortedSet<String> blacklistNodes = getNodeBlacklist();
Set<NodeInterfacePair> blacklistInterfaces = getInterfaceBlacklist();
SortedSet<Edge> blacklistEdges = getEdgeBlacklist();
popEnvironment();
BlacklistDstIpQuerySynthesizer blacklistQuery = new BlacklistDstIpQuerySynthesizer(null, blacklistNodes, blacklistInterfaces, blacklistEdges, baseConfigurations);
// compute composite program and flows
List<Synthesizer> synthesizers = ImmutableList.of(baseDataPlaneSynthesizer, diffDataPlaneSynthesizer, baseDataPlaneSynthesizer);
// generate base reachability and diff blackhole and blacklist queries
List<CompositeNodJob> jobs = ingressNodes.stream().flatMap(node -> baseConfigurations.get(node).getVrfs().keySet().stream().map(vrf -> {
Map<String, Set<String>> ingressNodeVrfs = ImmutableMap.of(node, ImmutableSet.of(vrf));
StandardReachabilityQuerySynthesizer acceptQuery = StandardReachabilityQuerySynthesizer.builder().setActions(ImmutableSet.of(ForwardingAction.ACCEPT, ForwardingAction.NEIGHBOR_UNREACHABLE_OR_EXITS_NETWORK)).setHeaderSpace(reachabilitySettings.getHeaderSpace()).setIngressNodeVrfs(ingressNodeVrfs).setFinalNodes(ImmutableSet.of()).setTransitNodes(ImmutableSet.of()).setNonTransitNodes(ImmutableSet.of()).setSrcNatted(reachabilitySettings.getSrcNatted()).build();
StandardReachabilityQuerySynthesizer notAcceptQuery = StandardReachabilityQuerySynthesizer.builder().setActions(ImmutableSet.of(ForwardingAction.ACCEPT, ForwardingAction.NEIGHBOR_UNREACHABLE_OR_EXITS_NETWORK)).setHeaderSpace(new HeaderSpace()).setIngressNodeVrfs(ingressNodeVrfs).setFinalNodes(ImmutableSet.of()).setTransitNodes(ImmutableSet.of()).setNonTransitNodes(ImmutableSet.of()).build();
notAcceptQuery.setNegate(true);
SortedSet<Pair<String, String>> nodes = ImmutableSortedSet.of(new Pair<>(node, vrf));
List<QuerySynthesizer> queries = ImmutableList.of(acceptQuery, notAcceptQuery, blacklistQuery);
return new CompositeNodJob(settings, synthesizers, queries, nodes, tag);
})).collect(Collectors.toList());
// TODO: maybe do something with nod answer element
Set<Flow> flows = computeCompositeNodOutput(jobs, new NodAnswerElement());
pushBaseEnvironment();
getDataPlanePlugin().processFlows(flows, loadDataPlane());
popEnvironment();
pushDeltaEnvironment();
getDataPlanePlugin().processFlows(flows, loadDataPlane());
popEnvironment();
AnswerElement answerElement = getHistory();
return answerElement;
}
use of org.batfish.datamodel.Configuration in project batfish by batfish.
the class Batfish method singleReachability.
private AnswerElement singleReachability(ReachabilitySettings reachabilitySettings, ReachabilityQuerySynthesizer.Builder<?, ?> builder) {
Settings settings = getSettings();
String tag = getFlowTag(_testrigSettings);
Set<ForwardingAction> actions = reachabilitySettings.getActions();
boolean useCompression = reachabilitySettings.getUseCompression();
// specialized compression
/*
CompressDataPlaneResult compressionResult =
useCompression ? computeCompressedDataPlane(headerSpace) : null;
Map<String, Configuration> configurations =
useCompression ? compressionResult._compressedConfigs : loadConfigurations();
DataPlane dataPlane = useCompression ? compressionResult._compressedDataPlane : loadDataPlane();
*/
// general compression
Snapshot snapshot = getSnapshot();
Map<String, Configuration> configurations = useCompression ? loadCompressedConfigurations(snapshot) : loadConfigurations(snapshot);
DataPlane dataPlane = loadDataPlane(useCompression);
if (configurations == null) {
throw new BatfishException("error loading configurations");
}
if (dataPlane == null) {
throw new BatfishException("error loading data plane");
}
Set<String> activeIngressNodes;
Set<String> activeFinalNodes;
HeaderSpace headerSpace;
Set<String> transitNodes;
Set<String> nonTransitNodes;
int maxChunkSize;
try {
activeIngressNodes = reachabilitySettings.computeActiveIngressNodes(configurations);
activeFinalNodes = reachabilitySettings.computeActiveFinalNodes(configurations);
headerSpace = reachabilitySettings.getHeaderSpace();
transitNodes = reachabilitySettings.computeActiveTransitNodes(configurations);
nonTransitNodes = reachabilitySettings.computeActiveNonTransitNodes(configurations);
maxChunkSize = reachabilitySettings.getMaxChunkSize();
reachabilitySettings.validateTransitNodes(configurations);
} catch (InvalidReachabilitySettingsException e) {
return e.getInvalidSettingsAnswer();
}
List<Pair<String, String>> originateNodeVrfs = activeIngressNodes.stream().flatMap(ingressNode -> configurations.get(ingressNode).getVrfs().keySet().stream().map(ingressVrf -> new Pair<>(ingressNode, ingressVrf))).collect(Collectors.toList());
int chunkSize = Math.max(1, Math.min(maxChunkSize, originateNodeVrfs.size() / _settings.getAvailableThreads()));
// partition originateNodeVrfs into chunks
List<List<Pair<String, String>>> originateNodeVrfChunks = Lists.partition(originateNodeVrfs, chunkSize);
Synthesizer dataPlaneSynthesizer = synthesizeDataPlane(configurations, dataPlane, loadForwardingAnalysis(configurations, dataPlane), headerSpace, reachabilitySettings.getSpecialize());
// build query jobs
List<NodJob> jobs = originateNodeVrfChunks.stream().map(ImmutableSortedSet::copyOf).map(nodeVrfs -> {
SortedMap<String, Set<String>> vrfsByNode = new TreeMap<>();
nodeVrfs.forEach(nodeVrf -> {
String node = nodeVrf.getFirst();
String vrf = nodeVrf.getSecond();
vrfsByNode.computeIfAbsent(node, key -> new TreeSet<>());
vrfsByNode.get(node).add(vrf);
});
ReachabilityQuerySynthesizer query = builder.setActions(actions).setHeaderSpace(headerSpace).setFinalNodes(activeFinalNodes).setIngressNodeVrfs(vrfsByNode).setTransitNodes(transitNodes).setNonTransitNodes(nonTransitNodes).setSrcNatted(reachabilitySettings.getSrcNatted()).build();
return new NodJob(settings, dataPlaneSynthesizer, query, nodeVrfs, tag, reachabilitySettings.getSpecialize());
}).collect(Collectors.toList());
// run jobs and get resulting flows
Set<Flow> flows = computeNodOutput(jobs);
getDataPlanePlugin().processFlows(flows, loadDataPlane());
AnswerElement answerElement = getHistory();
return answerElement;
}
use of org.batfish.datamodel.Configuration in project batfish by batfish.
the class Batfish method serializeIndependentConfigs.
private Answer serializeIndependentConfigs(Path vendorConfigPath) {
Answer answer = new Answer();
ConvertConfigurationAnswerElement answerElement = new ConvertConfigurationAnswerElement();
answerElement.setVersion(Version.getVersion());
if (_settings.getVerboseParse()) {
answer.addAnswerElement(answerElement);
}
Map<String, Configuration> configurations = getConfigurations(vendorConfigPath, answerElement);
Topology testrigTopology = computeTestrigTopology(_testrigSettings.getTestRigPath(), configurations);
serializeAsJson(_testrigSettings.getTopologyPath(), testrigTopology, "testrig topology");
checkTopology(configurations, testrigTopology);
org.batfish.datamodel.pojo.Topology pojoTopology = org.batfish.datamodel.pojo.Topology.create(_testrigSettings.getName(), configurations, testrigTopology);
serializeAsJson(_testrigSettings.getPojoTopologyPath(), pojoTopology, "testrig pojo topology");
_storage.storeConfigurations(configurations, answerElement, _testrigSettings.getName());
applyEnvironment(configurations);
Topology envTopology = computeEnvironmentTopology(configurations);
serializeAsJson(_testrigSettings.getEnvironmentSettings().getSerializedTopologyPath(), envTopology, "environment topology");
NodeRoleSpecifier roleSpecifier = inferNodeRoles(configurations);
serializeAsJson(_testrigSettings.getInferredNodeRolesPath(), roleSpecifier, "inferred node roles");
return answer;
}
use of org.batfish.datamodel.Configuration in project batfish by batfish.
the class CiscoConfiguration method toInterface.
private org.batfish.datamodel.Interface toInterface(Interface iface, Map<String, IpAccessList> ipAccessLists, Configuration c) {
String name = iface.getName();
org.batfish.datamodel.Interface newIface = new org.batfish.datamodel.Interface(name, c);
String vrfName = iface.getVrf();
Vrf vrf = _vrfs.computeIfAbsent(vrfName, Vrf::new);
newIface.setDescription(iface.getDescription());
newIface.setActive(iface.getActive());
newIface.setAutoState(iface.getAutoState());
newIface.setVrf(c.getVrfs().get(vrfName));
newIface.setBandwidth(iface.getBandwidth());
if (iface.getDhcpRelayClient()) {
newIface.getDhcpRelayAddresses().addAll(_dhcpRelayServers);
} else {
newIface.getDhcpRelayAddresses().addAll(iface.getDhcpRelayAddresses());
}
newIface.setMtu(getInterfaceMtu(iface));
newIface.setOspfPointToPoint(iface.getOspfPointToPoint());
newIface.setProxyArp(iface.getProxyArp());
newIface.setSpanningTreePortfast(iface.getSpanningTreePortfast());
newIface.setSwitchport(iface.getSwitchport());
newIface.setDeclaredNames(ImmutableSortedSet.copyOf(iface.getDeclaredNames()));
// All prefixes is the combination of the interface prefix + any secondary prefixes.
ImmutableSet.Builder<InterfaceAddress> allPrefixes = ImmutableSet.builder();
if (iface.getAddress() != null) {
newIface.setAddress(iface.getAddress());
allPrefixes.add(iface.getAddress());
}
allPrefixes.addAll(iface.getSecondaryAddresses());
newIface.setAllAddresses(allPrefixes.build());
Long ospfAreaLong = iface.getOspfArea();
if (ospfAreaLong != null) {
OspfProcess proc = vrf.getOspfProcess();
if (proc != null) {
if (iface.getOspfActive()) {
proc.getActiveInterfaceList().add(name);
}
if (iface.getOspfPassive()) {
proc.getPassiveInterfaceList().add(name);
}
for (InterfaceAddress address : newIface.getAllAddresses()) {
Prefix prefix = address.getPrefix();
OspfNetwork ospfNetwork = new OspfNetwork(prefix, ospfAreaLong);
proc.getNetworks().add(ospfNetwork);
}
} else {
_w.redFlag("Interface: '" + name + "' contains OSPF settings, but there is no OSPF process");
}
}
boolean level1 = false;
boolean level2 = false;
IsisProcess isisProcess = vrf.getIsisProcess();
if (isisProcess != null) {
switch(isisProcess.getLevel()) {
case LEVEL_1:
level1 = true;
break;
case LEVEL_1_2:
level1 = true;
level2 = true;
break;
case LEVEL_2:
level2 = true;
break;
default:
throw new VendorConversionException("Invalid IS-IS level");
}
}
if (level1) {
newIface.setIsisL1InterfaceMode(iface.getIsisInterfaceMode());
} else {
newIface.setIsisL1InterfaceMode(IsisInterfaceMode.UNSET);
}
if (level2) {
newIface.setIsisL2InterfaceMode(iface.getIsisInterfaceMode());
} else {
newIface.setIsisL2InterfaceMode(IsisInterfaceMode.UNSET);
}
newIface.setIsisCost(iface.getIsisCost());
newIface.setOspfCost(iface.getOspfCost());
newIface.setOspfDeadInterval(iface.getOspfDeadInterval());
newIface.setOspfHelloMultiplier(iface.getOspfHelloMultiplier());
// switch settings
newIface.setAccessVlan(iface.getAccessVlan());
newIface.setNativeVlan(iface.getNativeVlan());
newIface.setSwitchportMode(iface.getSwitchportMode());
SwitchportEncapsulationType encapsulation = iface.getSwitchportTrunkEncapsulation();
if (encapsulation == null) {
// no encapsulation set, so use default..
// TODO: check if this is OK
encapsulation = SwitchportEncapsulationType.DOT1Q;
}
newIface.setSwitchportTrunkEncapsulation(encapsulation);
newIface.addAllowedRanges(iface.getAllowedVlans());
String incomingFilterName = iface.getIncomingFilter();
if (incomingFilterName != null) {
int incomingFilterLine = iface.getIncomingFilterLine();
IpAccessList incomingFilter = ipAccessLists.get(incomingFilterName);
if (incomingFilter == null) {
undefined(CiscoStructureType.IP_ACCESS_LIST, incomingFilterName, CiscoStructureUsage.INTERFACE_INCOMING_FILTER, incomingFilterLine);
} else {
String msg = "incoming acl for interface: " + iface.getName();
ExtendedAccessList incomingExtendedAccessList = _extendedAccessLists.get(incomingFilterName);
if (incomingExtendedAccessList != null) {
incomingExtendedAccessList.getReferers().put(iface, msg);
}
StandardAccessList incomingStandardAccessList = _standardAccessLists.get(incomingFilterName);
if (incomingStandardAccessList != null) {
incomingStandardAccessList.getReferers().put(iface, msg);
}
}
newIface.setIncomingFilter(incomingFilter);
}
String outgoingFilterName = iface.getOutgoingFilter();
if (outgoingFilterName != null) {
int outgoingFilterLine = iface.getOutgoingFilterLine();
IpAccessList outgoingFilter = ipAccessLists.get(outgoingFilterName);
if (outgoingFilter == null) {
undefined(CiscoStructureType.IP_ACCESS_LIST, outgoingFilterName, CiscoStructureUsage.INTERFACE_OUTGOING_FILTER, outgoingFilterLine);
} else {
String msg = "outgoing acl for interface: " + iface.getName();
ExtendedAccessList outgoingExtendedAccessList = _extendedAccessLists.get(outgoingFilterName);
if (outgoingExtendedAccessList != null) {
outgoingExtendedAccessList.getReferers().put(iface, msg);
}
StandardAccessList outgoingStandardAccessList = _standardAccessLists.get(outgoingFilterName);
if (outgoingStandardAccessList != null) {
outgoingStandardAccessList.getReferers().put(iface, msg);
}
}
newIface.setOutgoingFilter(outgoingFilter);
}
List<CiscoSourceNat> origSourceNats = iface.getSourceNats();
if (origSourceNats != null) {
// Process each of the CiscoSourceNats:
// 1) Collect references to ACLs and NAT pools.
// 2) For valid CiscoSourceNat rules, add them to the newIface source NATs list.
newIface.setSourceNats(origSourceNats.stream().map(nat -> processSourceNat(nat, iface, ipAccessLists)).filter(Objects::nonNull).collect(ImmutableList.toImmutableList()));
}
String routingPolicyName = iface.getRoutingPolicy();
if (routingPolicyName != null) {
int routingPolicyLine = iface.getRoutingPolicyLine();
RouteMap routingPolicyRouteMap = _routeMaps.get(routingPolicyName);
if (routingPolicyRouteMap == null) {
undefined(CiscoStructureType.ROUTE_MAP, routingPolicyName, CiscoStructureUsage.INTERFACE_POLICY_ROUTING_MAP, routingPolicyLine);
} else {
routingPolicyRouteMap.getReferers().put(iface, "routing policy for interface: " + iface.getName());
}
newIface.setRoutingPolicy(routingPolicyName);
}
return newIface;
}
use of org.batfish.datamodel.Configuration in project batfish by batfish.
the class Batfish method initRemoteRipNeighbors.
@Override
public void initRemoteRipNeighbors(Map<String, Configuration> configurations, Map<Ip, Set<String>> ipOwners, Topology topology) {
for (Entry<String, Configuration> e : configurations.entrySet()) {
String hostname = e.getKey();
Configuration c = e.getValue();
for (Entry<String, Vrf> e2 : c.getVrfs().entrySet()) {
Vrf vrf = e2.getValue();
RipProcess proc = vrf.getRipProcess();
if (proc != null) {
proc.setRipNeighbors(new TreeMap<>());
String vrfName = e2.getKey();
for (String ifaceName : proc.getInterfaces()) {
Interface iface = vrf.getInterfaces().get("ifaceName");
SortedSet<Edge> ifaceEdges = topology.getInterfaceEdges().get(new NodeInterfacePair(hostname, ifaceName));
boolean hasNeighbor = false;
Ip localIp = iface.getAddress().getIp();
if (ifaceEdges != null) {
for (Edge edge : ifaceEdges) {
if (edge.getNode1().equals(hostname)) {
String remoteHostname = edge.getNode2();
String remoteIfaceName = edge.getInt2();
Configuration remoteNode = configurations.get(remoteHostname);
Interface remoteIface = remoteNode.getInterfaces().get(remoteIfaceName);
Vrf remoteVrf = remoteIface.getVrf();
String remoteVrfName = remoteVrf.getName();
RipProcess remoteProc = remoteVrf.getRipProcess();
if (remoteProc != null) {
if (remoteProc.getRipNeighbors() == null) {
remoteProc.setRipNeighbors(new TreeMap<>());
}
if (remoteProc.getInterfaces().contains(remoteIfaceName)) {
Ip remoteIp = remoteIface.getAddress().getIp();
Pair<Ip, Ip> localKey = new Pair<>(localIp, remoteIp);
RipNeighbor neighbor = proc.getRipNeighbors().get(localKey);
if (neighbor == null) {
hasNeighbor = true;
// initialize local neighbor
neighbor = new RipNeighbor(localKey);
neighbor.setVrf(vrfName);
neighbor.setOwner(c);
neighbor.setInterface(iface);
proc.getRipNeighbors().put(localKey, neighbor);
// initialize remote neighbor
Pair<Ip, Ip> remoteKey = new Pair<>(remoteIp, localIp);
RipNeighbor remoteNeighbor = new RipNeighbor(remoteKey);
remoteNeighbor.setVrf(remoteVrfName);
remoteNeighbor.setOwner(remoteNode);
remoteNeighbor.setInterface(remoteIface);
remoteProc.getRipNeighbors().put(remoteKey, remoteNeighbor);
// link neighbors
neighbor.setRemoteRipNeighbor(remoteNeighbor);
remoteNeighbor.setRemoteRipNeighbor(neighbor);
}
}
}
}
}
}
if (!hasNeighbor) {
Pair<Ip, Ip> key = new Pair<>(localIp, Ip.ZERO);
RipNeighbor neighbor = new RipNeighbor(key);
neighbor.setVrf(vrfName);
neighbor.setOwner(c);
neighbor.setInterface(iface);
proc.getRipNeighbors().put(key, neighbor);
}
}
}
}
}
}
Aggregations