use of org.opentripplanner.analyst.SampleSet in project OpenTripPlanner by opentripplanner.
the class SurfaceResource method getIndicator.
/**
* Evaluate a surface at all the points in a PointSet.
* This sends back a ResultSet serialized as JSON.
* Normally we return historgrams with the number of points reached (in field 'counts') and the number of
* opportunities reached (i.e. the sum of the magnitudes of all points reached) in each one-minute bin of travel
* time.
* @param detail if true, include the travel time to every point in the pointset (which is in fact an ordered list)
*/
@GET
@Path("/{surfaceId}/indicator")
public Response getIndicator(@PathParam("surfaceId") Integer surfaceId, @QueryParam("targets") String targetPointSetId, @QueryParam("origins") String originPointSetId, @QueryParam("detail") boolean detail) {
final TimeSurface surf = otpServer.surfaceCache.get(surfaceId);
if (surf == null)
return badRequest("Invalid TimeSurface ID.");
final PointSet pset = otpServer.pointSetCache.get(targetPointSetId);
if (pset == null)
return badRequest("Missing or invalid target PointSet ID.");
Router router = otpServer.getRouter(surf.routerId);
// TODO cache this sampleset
SampleSet samples = pset.getSampleSet(router.graph);
final ResultSet indicator = new ResultSet(samples, surf, detail, detail);
if (indicator == null)
return badServer("Could not compute indicator as requested.");
return Response.ok().entity(new StreamingOutput() {
@Override
public void write(OutputStream output) throws IOException, WebApplicationException {
indicator.writeJson(output);
}
}).build();
}
use of org.opentripplanner.analyst.SampleSet in project OpenTripPlanner by opentripplanner.
the class AnalystWorker method handleOneRequest.
/**
* This is the callback that processes a single task and returns the results upon completion.
* It may be called several times simultaneously on different executor threads.
*/
private void handleOneRequest(AnalystClusterRequest clusterRequest) {
if (dryRunFailureRate >= 0) {
// but will fail a certain percentage of the time.
if (random.nextInt(100) >= dryRunFailureRate) {
// Pretend to succeed.
deleteRequest(clusterRequest);
} else {
LOG.info("Intentionally failing on task {}", clusterRequest.taskId);
}
return;
}
try {
long startTime = System.currentTimeMillis();
LOG.info("Handling message {}", clusterRequest.toString());
// We need to distinguish between and handle four different types of requests here:
// Either vector isochrones or accessibility to a pointset,
// as either a single-origin priority request (where the result is returned immediately)
// or a job task (where the result is saved to output location on S3).
boolean isochrone = (clusterRequest.destinationPointsetId == null);
boolean singlePoint = (clusterRequest.outputLocation == null);
boolean transit = (clusterRequest.profileRequest.transitModes != null && clusterRequest.profileRequest.transitModes.isTransit());
if (singlePoint) {
lastHighPriorityRequestProcessed = startTime;
if (!sideChannelOpen) {
openSideChannel();
}
}
TaskStatistics ts = new TaskStatistics();
ts.pointsetId = clusterRequest.destinationPointsetId;
ts.graphId = clusterRequest.graphId;
ts.awsInstanceType = instanceType;
ts.jobId = clusterRequest.jobId;
ts.workerId = machineId;
ts.single = singlePoint;
// Get the graph object for the ID given in the request, fetching inputs and building as needed.
// All requests handled together are for the same graph, and this call is synchronized so the graph will
// only be built once.
long graphStartTime = System.currentTimeMillis();
Graph graph = clusterGraphBuilder.getGraph(clusterRequest.graphId);
// Record graphId so we "stick" to this same graph on subsequent polls
graphId = clusterRequest.graphId;
ts.graphBuild = (int) (System.currentTimeMillis() - graphStartTime);
ts.graphTripCount = graph.index.patternForTrip.size();
ts.graphStopCount = graph.index.stopForId.size();
ts.lon = clusterRequest.profileRequest.fromLon;
ts.lat = clusterRequest.profileRequest.fromLat;
final SampleSet sampleSet;
// fetch the set of points we will use as destinations.
if (isochrone) {
// This is an isochrone request, tell the RepeatedRaptorProfileRouter there are no targets.
sampleSet = null;
} else {
// This is not an isochrone request. There is necessarily a destination point set supplied.
PointSet pointSet = pointSetDatastore.get(clusterRequest.destinationPointsetId);
// TODO this breaks if graph has been rebuilt
sampleSet = pointSet.getOrCreateSampleSet(graph);
}
// Note that all parameters to create the Raptor worker data are passed in the constructor except ts.
// Why not pass in ts as well since this is a throwaway calculator?
RepeatedRaptorProfileRouter router = new RepeatedRaptorProfileRouter(graph, clusterRequest.profileRequest, sampleSet);
router.ts = ts;
// But then we'd need to pass in both the cache and the key, which is weird.
if (transit && !singlePoint) {
long dataStart = System.currentTimeMillis();
router.raptorWorkerData = workerDataCache.get(clusterRequest.jobId, () -> RepeatedRaptorProfileRouter.getRaptorWorkerData(clusterRequest.profileRequest, graph, sampleSet, ts));
ts.raptorData = (int) (System.currentTimeMillis() - dataStart);
} else {
// The worker will generate a one-time throw-away table.
router.raptorWorkerData = null;
}
// Run the core repeated-raptor analysis.
// This result envelope will contain the results of the one-to-many profile or single-departure-time search.
ResultEnvelope envelope = new ResultEnvelope();
try {
// TODO when router runs, if there are no transit modes defined it should just skip the transit work.
router.includeTimes = clusterRequest.includeTimes;
envelope = router.route();
envelope.id = clusterRequest.id;
ts.success = true;
} catch (Exception ex) {
// An error occurred. Leave the envelope empty and TODO include error information.
LOG.error("Error occurred in profile request", ex);
ts.success = false;
}
// Send the ResultEnvelope back to the user.
// The results are either stored on S3 (for multi-origin jobs) or sent back through the broker (for
// immediate interactive display of isochrones).
envelope.id = clusterRequest.id;
envelope.jobId = clusterRequest.jobId;
envelope.destinationPointsetId = clusterRequest.destinationPointsetId;
if (clusterRequest.outputLocation != null) {
// Convert the result envelope and its contents to JSON and gzip it in this thread.
// Transfer the results to Amazon S3 in another thread, piping between the two.
String s3key = String.join("/", clusterRequest.jobId, clusterRequest.id + ".json.gz");
PipedInputStream inPipe = new PipedInputStream();
PipedOutputStream outPipe = new PipedOutputStream(inPipe);
new Thread(() -> {
s3.putObject(clusterRequest.outputLocation, s3key, inPipe, null);
}).start();
OutputStream gzipOutputStream = new GZIPOutputStream(outPipe);
// We could do the writeValue() in a thread instead, in which case both the DELETE and S3 options
// could consume it in the same way.
objectMapper.writeValue(gzipOutputStream, envelope);
gzipOutputStream.close();
// Tell the broker the task has been handled and should not be re-delivered to another worker.
deleteRequest(clusterRequest);
} else {
// No output location was provided. Instead of saving the result on S3,
// return the result immediately via a connection held open by the broker and mark the task completed.
finishPriorityTask(clusterRequest, envelope);
}
// Record information about the current task so we can analyze usage and efficiency over time.
ts.total = (int) (System.currentTimeMillis() - startTime);
statsStore.store(ts);
} catch (Exception ex) {
LOG.error("An error occurred while routing", ex);
}
}
Aggregations