use of io.georocket.output.Merger in project georocket by georocket.
the class StoreEndpoint method doMerge.
/**
* Perform a search and merge all retrieved chunks using the given merger
* @param merger the merger
* @param data Data to merge into the response
* @param out the response to write the merged chunks to
* @return a single that will emit one item when all chunks have been merged
*/
private Single<Void> doMerge(Merger<ChunkMeta> merger, Single<StoreCursor> data, WriteStream<Buffer> out) {
return data.map(RxStoreCursor::new).flatMapObservable(RxStoreCursor::toObservable).flatMap(p -> store.rxGetOne(p.getRight()).flatMapObservable(crs -> merger.merge(crs, p.getLeft(), out).map(// left: count, right: not_accepted
v -> Pair.of(1L, 0L)).onErrorResumeNext(t -> {
if (t instanceof IllegalStateException) {
// ignore it, but emit a warning later
return Observable.just(Pair.of(0L, 1L));
}
return Observable.error(t);
}).doOnTerminate(() -> {
// don't forget to close the chunk!
crs.close();
})), 1).defaultIfEmpty(Pair.of(0L, 0L)).reduce((p1, p2) -> Pair.of(p1.getLeft() + p2.getLeft(), p1.getRight() + p2.getRight())).flatMap(p -> {
long count = p.getLeft();
long notaccepted = p.getRight();
if (notaccepted > 0) {
log.warn("Could not merge " + notaccepted + " chunks " + "because the merger did not accept them. Most likely " + "these are new chunks that were added while the " + "merge was in progress. If this worries you, just " + "repeat the request.");
}
if (count > 0) {
merger.finish(out);
return Observable.just(null);
} else {
return Observable.error(new FileNotFoundException("Not Found"));
}
}).toSingle().map(v -> null);
}
use of io.georocket.output.Merger in project georocket by georocket.
the class StoreEndpoint method getChunks.
/**
* Retrieve all chunks matching the specified query and path
* @param context the routing context
*/
private void getChunks(RoutingContext context) {
HttpServerResponse response = context.response();
Single<StoreCursor> data = prepareCursor(context);
// Our responses must always be chunked because we cannot calculate
// the exact content-length beforehand. We perform two searches, one to
// initialize the merger and one to do the actual merge. The problem is
// that the result set may change between these two searches and so we
// cannot calculate the content-length just from looking at the result
// from the first search.
response.setChunked(true);
// perform two searches: first initialize the merger and then
// merge all retrieved chunks
Merger<ChunkMeta> merger = createMerger(context);
initializeMerger(merger, data).flatMapSingle(v -> doMerge(merger, data, response)).subscribe(v -> {
response.end();
}, err -> {
if (!(err instanceof FileNotFoundException)) {
log.error("Could not perform query", err);
}
fail(response, err);
});
}
Aggregations