use of org.opentripplanner.analyst.cluster.ResultEnvelope in project OpenTripPlanner by opentripplanner.
the class PropagatedTimesStore method makeIsochronesForVertices.
/**
* This bypasses a bunch of conversion and copy steps and just makes the isochrones.
* This assumes that the target indexes in this router/propagatedTimesStore are vertex indexes, not pointset indexes.
* TODO parameter for a pointset or a vertex lookup table, so we can handle both.
*/
public ResultEnvelope makeIsochronesForVertices() {
ResultEnvelope envelope = new ResultEnvelope();
envelope.bestCase = makeIsochroneForVertices(mins);
envelope.avgCase = makeIsochroneForVertices(avgs);
envelope.worstCase = makeIsochroneForVertices(maxs);
return envelope;
}
use of org.opentripplanner.analyst.cluster.ResultEnvelope in project OpenTripPlanner by opentripplanner.
the class RepeatedRaptorComparison method main.
public static void main(String... args) {
if (args.length == 0) {
System.err.println("too few arguments.");
return;
}
// build a graph
File graphDir = new File(args[0]);
Graph graph = buildGraph(graphDir);
DB comparisonDb = null;
BTreeMap<Fun.Tuple3<String, String, ResultEnvelope.Which>, Integer> comparison = null;
// open the comparison file, if we have one.
if (args.length > 1) {
comparisonDb = DBMaker.newFileDB(new File(args[1])).readOnly().transactionDisable().closeOnJvmShutdown().cacheSize(24).asyncWriteEnable().make();
comparison = comparisonDb.getTreeMap("results");
}
String outputName = args.length > 2 ? args[2] : MavenVersion.VERSION.commit + ".db";
DB outputDb = DBMaker.newFileDB(new File(outputName)).transactionDisable().cacheSize(48).closeOnJvmShutdown().make();
final BTreeMap<Fun.Tuple3<String, String, ResultEnvelope.Which>, Integer> output = outputDb.createTreeMap("results").valueSerializer(Serializer.JAVA).makeOrGet();
// if we have a comparison file, get the pointset from it. Otherwise choose some randomly.
Collection<String> vertexLabels;
PointSet pset;
if (comparison != null) {
// clooge, pointset is stored in its own map in db.
pset = comparisonDb.<String, PointSet>getTreeMap("pointset").get("pointset");
} else {
// choose some vertices
List<Vertex> vertices = graph.getVertices().stream().filter(v -> v.getLabel().startsWith("osm:node:")).limit(1000).collect(Collectors.toList());
// make a pointset
pset = new PointSet(vertices.size());
int featIdx = 0;
for (Vertex v : vertices) {
PointFeature pf = new PointFeature();
pf.setId(v.getLabel());
pf.setLat(v.getLat() + OFFSET_Y);
pf.setLon(v.getLon() + OFFSET_X);
pset.addFeature(pf, featIdx++);
}
outputDb.createTreeMap("pointset").<String, PointSet>make().put("pointset", pset);
}
SampleSet ss = new SampleSet(pset, graph.getSampleFactory());
final BTreeMap<Fun.Tuple3<String, String, ResultEnvelope.Which>, Integer> comparisonResults = comparison;
Histogram bestCaseHisto = new Histogram("Best case");
Histogram avgCaseHisto = new Histogram("Average");
Histogram worstCaseHisto = new Histogram("Worst case");
ProfileRequest template = new ProfileRequest();
template.accessModes = new QualifiedModeSet("WALK");
template.analyst = true;
template.maxWalkTime = 20 * 60;
template.walkSpeed = 1.3f;
template.fromTime = 7 * 3600;
template.toTime = 9 * 3600;
template.date = new LocalDate(2015, 8, 4);
RaptorWorkerData data = RepeatedRaptorProfileRouter.getRaptorWorkerData(template, graph, ss, new TaskStatistics());
// do the computation and comparison
IntStream.range(0, pset.featureCount()).parallel().forEach(idx -> {
if (idx % 100 == 0)
System.out.println(idx + " points complete");
Coordinate coord = pset.getCoordinate(idx);
String origin = pset.getFeature(idx).getId();
ProfileRequest req;
try {
req = template.clone();
} catch (CloneNotSupportedException e) {
/* can't happen */
throw new RuntimeException(e);
}
req.maxWalkTime = 20 * 60;
req.fromLat = req.toLat = coord.y;
req.fromLon = req.toLon = coord.x;
// 7 to 9 AM
req.fromTime = 7 * 3600;
req.toTime = 9 * 3600;
req.transitModes = new TraverseModeSet("TRANSIT");
RepeatedRaptorProfileRouter rrpr = new RepeatedRaptorProfileRouter(graph, req, ss);
rrpr.raptorWorkerData = data;
rrpr.includeTimes = true;
// TODO we really want to disable both isochrone and accessibility generation here.
// Because a sampleSet is provided it's going to make accessibility information (not isochrones).
ResultEnvelope results = new ResultEnvelope();
try {
results = rrpr.route();
} catch (Exception e) {
LOG.error("Exception during routing", e);
return;
}
for (ResultEnvelope.Which which : new ResultEnvelope.Which[] { ResultEnvelope.Which.BEST_CASE, ResultEnvelope.Which.AVERAGE, ResultEnvelope.Which.WORST_CASE }) {
Histogram histogram;
ResultSet resultSet;
switch(which) {
case BEST_CASE:
histogram = bestCaseHisto;
resultSet = results.bestCase;
break;
case WORST_CASE:
histogram = worstCaseHisto;
resultSet = results.worstCase;
break;
case AVERAGE:
histogram = avgCaseHisto;
resultSet = results.avgCase;
break;
default:
histogram = null;
resultSet = null;
}
// comparison.
for (int i = 0; i < resultSet.times.length; i++) {
int time = resultSet.times[i];
// TODO this is creating a PointFeature obj to hold the id at each call
// Cache?
String dest = pset.getFeature(i).getId();
Fun.Tuple3<String, String, ResultEnvelope.Which> key = new Fun.Tuple3<>(origin, dest, which);
output.put(key, time);
if (time < 0) {
LOG.error("Path from {} to {} has negative time {}", origin, dest, time);
}
if (comparisonResults != null) {
int time0 = comparisonResults.get(key);
int deltaMinutes;
if (time0 == RaptorWorker.UNREACHED && time != RaptorWorker.UNREACHED)
deltaMinutes = (time / 60) - 120;
else if (time == RaptorWorker.UNREACHED && time0 != RaptorWorker.UNREACHED)
deltaMinutes = 120 - (time0 / 60);
else
deltaMinutes = (time - time0) / 60;
// histograms are not threadsafe
synchronized (histogram) {
histogram.add(deltaMinutes);
}
}
}
}
});
output.close();
if (comparisonDb != null) {
comparisonDb.close();
bestCaseHisto.displayHorizontal();
System.out.println("mean: " + bestCaseHisto.mean());
avgCaseHisto.displayHorizontal();
System.out.println("mean: " + avgCaseHisto.mean());
worstCaseHisto.displayHorizontal();
System.out.println("mean: " + worstCaseHisto.mean());
}
}
use of org.opentripplanner.analyst.cluster.ResultEnvelope in project OpenTripPlanner by opentripplanner.
the class PropagatedTimesStore method makeResults.
/**
* Make a ResultEnvelope directly from a given SampleSet.
* The RaptorWorkerData must have been constructed from the same SampleSet.
*/
public ResultEnvelope makeResults(SampleSet ss, boolean includeTimes, boolean includeHistograms, boolean includeIsochrones) {
ResultEnvelope envelope = new ResultEnvelope();
// max times == worst case accessibility
envelope.worstCase = new ResultSet(maxs, ss.pset, includeTimes, includeHistograms, includeIsochrones);
envelope.avgCase = new ResultSet(avgs, ss.pset, includeTimes, includeHistograms, includeIsochrones);
envelope.bestCase = new ResultSet(mins, ss.pset, includeTimes, includeHistograms, includeIsochrones);
return envelope;
}
use of org.opentripplanner.analyst.cluster.ResultEnvelope in project OpenTripPlanner by opentripplanner.
the class RepeatedRaptorProfileRouter method route.
public ResultEnvelope route() {
// When no sample set is provided, we're making isochrones.
boolean isochrone = (sampleSet == null);
// Does the search involve transit at all?
boolean transit = (request.transitModes != null && request.transitModes.isTransit());
long computationStartTime = System.currentTimeMillis();
LOG.info("Begin profile request");
// We only create data tables if transit is in use, otherwise they wouldn't serve any purpose.
if (raptorWorkerData == null && transit) {
long dataStart = System.currentTimeMillis();
raptorWorkerData = getRaptorWorkerData(request, graph, sampleSet, ts);
ts.raptorData = (int) (System.currentTimeMillis() - dataStart);
}
// Find the transit stops that are accessible from the origin, leaving behind an SPT behind of access
// times to all reachable vertices.
long initialStopStartTime = System.currentTimeMillis();
// This will return null if we have no transit data, but will leave behind a pre-transit SPT.
TIntIntMap transitStopAccessTimes = findInitialStops(false, raptorWorkerData);
// Create an array containing the best travel time in seconds to each vertex in the graph when not using transit.
int[] nonTransitTimes = new int[Vertex.getMaxIndex()];
Arrays.fill(nonTransitTimes, Integer.MAX_VALUE);
for (State state : preTransitSpt.getAllStates()) {
// Note that we are using the walk distance divided by speed here in order to be consistent with the
// least-walk optimization in the initial stop search (and the stop tree cache which is used at egress)
// TODO consider why this matters, I'm using reported travel time from the states
int time = (int) state.getElapsedTimeSeconds();
int vidx = state.getVertex().getIndex();
int otime = nonTransitTimes[vidx];
// There may be dominated states in the SPT. Make sure we don't include them here.
if (otime > time) {
nonTransitTimes[vidx] = time;
}
}
ts.initialStopSearch = (int) (System.currentTimeMillis() - initialStopStartTime);
// FIXME wasn't the walk search already performed above?
long walkSearchStart = System.currentTimeMillis();
// in the graph. Therefore we must replace the vertex-indexed array with a new point-indexed array.
if (sampleSet != null) {
nonTransitTimes = sampleSet.eval(nonTransitTimes);
}
ts.walkSearch = (int) (System.currentTimeMillis() - walkSearchStart);
if (transit) {
RaptorWorker worker = new RaptorWorker(raptorWorkerData, request);
propagatedTimesStore = worker.runRaptor(graph, transitStopAccessTimes, nonTransitTimes, ts);
ts.initialStopCount = transitStopAccessTimes.size();
} else {
// Nontransit case: skip transit routing and make a propagated times store based on only one row.
propagatedTimesStore = new PropagatedTimesStore(graph, request, nonTransitTimes.length);
int[][] singleRoundResults = new int[1][];
singleRoundResults[0] = nonTransitTimes;
propagatedTimesStore.setFromArray(singleRoundResults, new boolean[] { true }, PropagatedTimesStore.ConfidenceCalculationMethod.MIN_MAX);
}
for (int min : propagatedTimesStore.mins) {
if (min != RaptorWorker.UNREACHED)
ts.targetsReached++;
}
ts.compute = (int) (System.currentTimeMillis() - computationStartTime);
LOG.info("Profile request finished in {} seconds", (ts.compute) / 1000.0);
// Turn the results of the search into isochrone geometries or accessibility data as requested.
long resultSetStart = System.currentTimeMillis();
ResultEnvelope envelope = new ResultEnvelope();
if (isochrone) {
// No destination point set was provided and we're just making isochrones based on travel time to vertices,
// rather than finding access times to a set of user-specified points.
envelope = propagatedTimesStore.makeIsochronesForVertices();
} else {
// A destination point set was provided. We've found access times to a set of specified points.
// TODO actually use those boolean params to calculate isochrones on a regular grid pointset
// TODO maybe there's a better way to pass includeTimes in here from the clusterRequest,
// maybe we should just provide the whole clusterRequest not just the wrapped profileRequest.
envelope = propagatedTimesStore.makeResults(sampleSet, includeTimes, true, false);
}
ts.resultSets = (int) (System.currentTimeMillis() - resultSetStart);
return envelope;
}
use of org.opentripplanner.analyst.cluster.ResultEnvelope in project OpenTripPlanner by opentripplanner.
the class TNPropagatedTimesStore method makeResults.
/**
* Make a ResultEnvelope directly from a given SampleSet.
* The RaptorWorkerData must have been constructed from the same SampleSet.
* This is how the accumulated results are returned back out to the PropagatedTimesStore's creator.
*/
public ResultEnvelope makeResults(PointSet pointSet, boolean includeTimes, boolean includeHistograms, boolean includeIsochrones) {
ResultEnvelope envelope = new ResultEnvelope();
envelope.worstCase = new ResultSet(maxs, pointSet, includeTimes, includeHistograms, includeIsochrones);
envelope.avgCase = new ResultSet(avgs, pointSet, includeTimes, includeHistograms, includeIsochrones);
envelope.bestCase = new ResultSet(mins, pointSet, includeTimes, includeHistograms, includeIsochrones);
return envelope;
}
Aggregations