use of com.graphhopper.routing.subnetwork.TarjansSCCAlgorithm in project graphhopper by graphhopper.
the class LandmarkStorage method createLandmarks.
/**
* This method calculates the landmarks and initial weightings to & from them.
*/
public void createLandmarks() {
if (isInitialized())
throw new IllegalStateException("Initialize the landmark storage only once!");
// fill 'from' and 'to' weights with maximum value
long maxBytes = (long) graph.getNodes() * LM_ROW_LENGTH;
this.landmarkWeightDA.create(2000);
this.landmarkWeightDA.ensureCapacity(maxBytes);
for (long pointer = 0; pointer < maxBytes; pointer += 2) {
landmarkWeightDA.setShort(pointer, (short) SHORT_INFINITY);
}
String additionalInfo = "";
// guess the factor
if (factor <= 0) {
// A 'factor' is necessary to store the weight in just a short value but without loosing too much precision.
// This factor is rather delicate to pick, we estimate it through the graph boundaries its maximum distance.
// For small areas we use max_bounds_dist*X and otherwise we use a big fixed value for this distance.
// If we would pick the distance too big for small areas this could lead to (slightly) suboptimal routes as there
// will be too big rounding errors. But picking it too small is dangerous regarding performance
// e.g. for Germany at least 1500km is very important otherwise speed is at least twice as slow e.g. for just 1000km
BBox bounds = graph.getBounds();
double distanceInMeter = Helper.DIST_EARTH.calcDist(bounds.maxLat, bounds.maxLon, bounds.minLat, bounds.minLon) * 7;
if (distanceInMeter > 50_000 * 7 || /* for tests and convenience we do for now: */
!bounds.isValid())
distanceInMeter = 30_000_000;
double maxWeight = weighting.getMinWeight(distanceInMeter);
setMaximumWeight(maxWeight);
additionalInfo = ", maxWeight:" + maxWeight + ", from max distance:" + distanceInMeter / 1000f + "km";
}
LOGGER.info("init landmarks for subnetworks with node count greater than " + minimumNodes + " with factor:" + factor + additionalInfo);
// special subnetwork 0
int[] empty = new int[landmarks];
Arrays.fill(empty, UNSET_SUBNETWORK);
landmarkIDs.add(empty);
byte[] subnetworks = new byte[graph.getNodes()];
Arrays.fill(subnetworks, (byte) UNSET_SUBNETWORK);
EdgeFilter tarjanFilter = new DefaultEdgeFilter(encoder, false, true);
IntHashSet blockedEdges = new IntHashSet();
// the ruleLookup splits certain areas from each other but avoids making this a permanent change so that other algorithms still can route through these regions.
if (ruleLookup != null && ruleLookup.size() > 0) {
StopWatch sw = new StopWatch().start();
blockedEdges = findBorderEdgeIds(ruleLookup);
tarjanFilter = new BlockedEdgesFilter(encoder, false, true, blockedEdges);
LOGGER.info("Made " + blockedEdges.size() + " edges inaccessible. Calculated country cut in " + sw.stop().getSeconds() + "s, " + Helper.getMemInfo());
}
StopWatch sw = new StopWatch().start();
// we cannot reuse the components calculated in PrepareRoutingSubnetworks as the edgeIds changed in between (called graph.optimize)
// also calculating subnetworks from scratch makes bigger problems when working with many oneways
TarjansSCCAlgorithm tarjanAlgo = new TarjansSCCAlgorithm(graph, tarjanFilter, true);
List<IntArrayList> graphComponents = tarjanAlgo.findComponents();
LOGGER.info("Calculated tarjan subnetworks in " + sw.stop().getSeconds() + "s, " + Helper.getMemInfo());
EdgeExplorer tmpExplorer = graph.createEdgeExplorer(new RequireBothDirectionsEdgeFilter(encoder));
int nodes = 0;
for (IntArrayList subnetworkIds : graphComponents) {
nodes += subnetworkIds.size();
if (subnetworkIds.size() < minimumNodes)
continue;
int index = subnetworkIds.size() - 1;
// ensure start node is reachable from both sides and no subnetwork is associated
for (; index >= 0; index--) {
int nextStartNode = subnetworkIds.get(index);
if (subnetworks[nextStartNode] == UNSET_SUBNETWORK && GHUtility.count(tmpExplorer.setBaseNode(nextStartNode)) > 0) {
GHPoint p = createPoint(graph, nextStartNode);
LOGGER.info("start node: " + nextStartNode + " (" + p + ") subnetwork size: " + subnetworkIds.size() + ", " + Helper.getMemInfo() + ((ruleLookup == null) ? "" : " area:" + ruleLookup.lookupRule(p).getId()));
if (createLandmarksForSubnetwork(nextStartNode, subnetworks, blockedEdges))
break;
}
}
if (index < 0)
LOGGER.warn("next start node not found in big enough network of size " + subnetworkIds.size() + ", first element is " + subnetworkIds.get(0) + ", " + createPoint(graph, subnetworkIds.get(0)));
}
int subnetworkCount = landmarkIDs.size();
// store all landmark node IDs and one int for the factor itself.
this.landmarkWeightDA.ensureCapacity(maxBytes + /* landmark weights */
subnetworkCount * landmarks);
// calculate offset to point into landmark mapping
long bytePos = maxBytes;
for (int[] landmarks : landmarkIDs) {
for (int lmNodeId : landmarks) {
landmarkWeightDA.setInt(bytePos, lmNodeId);
bytePos += 4L;
}
}
landmarkWeightDA.setHeader(0 * 4, graph.getNodes());
landmarkWeightDA.setHeader(1 * 4, landmarks);
landmarkWeightDA.setHeader(2 * 4, subnetworkCount);
if (factor * DOUBLE_MLTPL > Integer.MAX_VALUE)
throw new UnsupportedOperationException("landmark weight factor cannot be bigger than Integer.MAX_VALUE " + factor * DOUBLE_MLTPL);
landmarkWeightDA.setHeader(3 * 4, (int) Math.round(factor * DOUBLE_MLTPL));
// serialize fast byte[] into DataAccess
subnetworkStorage.create(graph.getNodes());
for (int nodeId = 0; nodeId < subnetworks.length; nodeId++) {
subnetworkStorage.setSubnetwork(nodeId, subnetworks[nodeId]);
}
LOGGER.info("Finished landmark creation. Subnetwork node count sum " + nodes + " vs. nodes " + graph.getNodes());
initialized = true;
}
Aggregations