use of org.batfish.datamodel.DataPlane in project batfish by batfish.
the class Batfish method saveDataPlane.
/* Write the dataplane to disk and cache, and write the answer element to disk.
*/
private void saveDataPlane(DataPlane dataPlane, DataPlaneAnswerElement answerElement, boolean compressed) {
Path dataPlanePath = compressed ? _testrigSettings.getEnvironmentSettings().getCompressedDataPlanePath() : _testrigSettings.getEnvironmentSettings().getDataPlanePath();
Path answerElementPath = compressed ? _testrigSettings.getEnvironmentSettings().getCompressedDataPlaneAnswerPath() : _testrigSettings.getEnvironmentSettings().getDataPlaneAnswerPath();
Cache<TestrigSettings, DataPlane> cache = compressed ? _cachedCompressedDataPlanes : _cachedDataPlanes;
cache.put(_testrigSettings, dataPlane);
_logger.resetTimer();
newBatch("Writing data plane to disk", 0);
try (ActiveSpan writeDataplane = GlobalTracer.get().buildSpan("Writing data plane").startActive()) {
// avoid unused warning
assert writeDataplane != null;
serializeObject(dataPlane, dataPlanePath);
serializeObject(answerElement, answerElementPath);
}
_logger.printElapsedTime();
}
use of org.batfish.datamodel.DataPlane in project batfish by batfish.
the class Batfish method singleReachability.
private AnswerElement singleReachability(ReachabilitySettings reachabilitySettings, ReachabilityQuerySynthesizer.Builder<?, ?> builder) {
Settings settings = getSettings();
String tag = getFlowTag(_testrigSettings);
Set<ForwardingAction> actions = reachabilitySettings.getActions();
boolean useCompression = reachabilitySettings.getUseCompression();
// specialized compression
/*
CompressDataPlaneResult compressionResult =
useCompression ? computeCompressedDataPlane(headerSpace) : null;
Map<String, Configuration> configurations =
useCompression ? compressionResult._compressedConfigs : loadConfigurations();
DataPlane dataPlane = useCompression ? compressionResult._compressedDataPlane : loadDataPlane();
*/
// general compression
Snapshot snapshot = getSnapshot();
Map<String, Configuration> configurations = useCompression ? loadCompressedConfigurations(snapshot) : loadConfigurations(snapshot);
DataPlane dataPlane = loadDataPlane(useCompression);
if (configurations == null) {
throw new BatfishException("error loading configurations");
}
if (dataPlane == null) {
throw new BatfishException("error loading data plane");
}
Set<String> activeIngressNodes;
Set<String> activeFinalNodes;
HeaderSpace headerSpace;
Set<String> transitNodes;
Set<String> nonTransitNodes;
int maxChunkSize;
try {
activeIngressNodes = reachabilitySettings.computeActiveIngressNodes(configurations);
activeFinalNodes = reachabilitySettings.computeActiveFinalNodes(configurations);
headerSpace = reachabilitySettings.getHeaderSpace();
transitNodes = reachabilitySettings.computeActiveTransitNodes(configurations);
nonTransitNodes = reachabilitySettings.computeActiveNonTransitNodes(configurations);
maxChunkSize = reachabilitySettings.getMaxChunkSize();
reachabilitySettings.validateTransitNodes(configurations);
} catch (InvalidReachabilitySettingsException e) {
return e.getInvalidSettingsAnswer();
}
List<Pair<String, String>> originateNodeVrfs = activeIngressNodes.stream().flatMap(ingressNode -> configurations.get(ingressNode).getVrfs().keySet().stream().map(ingressVrf -> new Pair<>(ingressNode, ingressVrf))).collect(Collectors.toList());
int chunkSize = Math.max(1, Math.min(maxChunkSize, originateNodeVrfs.size() / _settings.getAvailableThreads()));
// partition originateNodeVrfs into chunks
List<List<Pair<String, String>>> originateNodeVrfChunks = Lists.partition(originateNodeVrfs, chunkSize);
Synthesizer dataPlaneSynthesizer = synthesizeDataPlane(configurations, dataPlane, loadForwardingAnalysis(configurations, dataPlane), headerSpace, reachabilitySettings.getSpecialize());
// build query jobs
List<NodJob> jobs = originateNodeVrfChunks.stream().map(ImmutableSortedSet::copyOf).map(nodeVrfs -> {
SortedMap<String, Set<String>> vrfsByNode = new TreeMap<>();
nodeVrfs.forEach(nodeVrf -> {
String node = nodeVrf.getFirst();
String vrf = nodeVrf.getSecond();
vrfsByNode.computeIfAbsent(node, key -> new TreeSet<>());
vrfsByNode.get(node).add(vrf);
});
ReachabilityQuerySynthesizer query = builder.setActions(actions).setHeaderSpace(headerSpace).setFinalNodes(activeFinalNodes).setIngressNodeVrfs(vrfsByNode).setTransitNodes(transitNodes).setNonTransitNodes(nonTransitNodes).setSrcNatted(reachabilitySettings.getSrcNatted()).build();
return new NodJob(settings, dataPlaneSynthesizer, query, nodeVrfs, tag, reachabilitySettings.getSpecialize());
}).collect(Collectors.toList());
// run jobs and get resulting flows
Set<Flow> flows = computeNodOutput(jobs);
getDataPlanePlugin().processFlows(flows, loadDataPlane());
AnswerElement answerElement = getHistory();
return answerElement;
}
use of org.batfish.datamodel.DataPlane in project batfish by batfish.
the class Batfish method loadDataPlane.
private DataPlane loadDataPlane(boolean compressed) {
Cache<TestrigSettings, DataPlane> cache = compressed ? _cachedCompressedDataPlanes : _cachedDataPlanes;
Path path = compressed ? _testrigSettings.getEnvironmentSettings().getCompressedDataPlanePath() : _testrigSettings.getEnvironmentSettings().getDataPlanePath();
DataPlane dp = cache.getIfPresent(_testrigSettings);
if (dp == null) {
/*
* Data plane should exist after loading answer element, as it triggers
* repair if necessary. However, it might not be cached if it was not
* repaired, so we still might need to load it from disk.
*/
loadDataPlaneAnswerElement(compressed);
dp = cache.getIfPresent(_testrigSettings);
if (dp == null) {
newBatch("Loading data plane from disk", 0);
dp = deserializeObject(path, DataPlane.class);
cache.put(_testrigSettings, dp);
}
}
return dp;
}
use of org.batfish.datamodel.DataPlane in project batfish by batfish.
the class BatfishCompressionTest method testCompressionFibs_compressibleNetwork.
/**
* Test the following invariant: if a FIB appears on concrete router “r”, then a corresponding
* abstract FIB appears on one of these representatives. For example, if there is a concrete FIB
* from C to D, then there should be an abstract FIB from A to B, where A is in representatives(C)
* and B is in representatives(D).
*/
@Test
public void testCompressionFibs_compressibleNetwork() throws IOException {
DataPlane origDataPlane = getDataPlane(compressibleNetwork());
SortedMap<String, Configuration> compressedConfigs = compressNetwork(compressibleNetwork(), new HeaderSpace());
DataPlane compressedDataPlane = getDataPlane(compressedConfigs);
SortedMap<String, SortedMap<String, GenericRib<AbstractRoute>>> origRibs = origDataPlane.getRibs();
SortedMap<String, SortedMap<String, GenericRib<AbstractRoute>>> compressedRibs = compressedDataPlane.getRibs();
/* Compression removed a node */
assertThat(compressedConfigs.entrySet(), hasSize(2));
compressedConfigs.values().forEach(BatfishCompressionTest::assertIsCompressedConfig);
compressedRibs.forEach((hostname, compressedRibsByVrf) -> compressedRibsByVrf.forEach((vrf, compressedRib) -> {
GenericRib<AbstractRoute> origRib = origRibs.get(hostname).get(vrf);
Set<AbstractRoute> origRoutes = origRib.getRoutes();
Set<AbstractRoute> compressedRoutes = compressedRib.getRoutes();
for (AbstractRoute route : compressedRoutes) {
/* Every compressed route should appear in original RIB */
assertThat(origRoutes, hasItem(route));
}
}));
}
Aggregations