use of io.druid.query.Query in project druid by druid-io.
the class DirectDruidClient method run.
@Override
public Sequence<T> run(final Query<T> query, final Map<String, Object> context) {
QueryToolChest<T, Query<T>> toolChest = warehouse.getToolChest(query);
boolean isBySegment = BaseQuery.getContextBySegment(query, false);
Pair<JavaType, JavaType> types = typesMap.get(query.getClass());
if (types == null) {
final TypeFactory typeFactory = objectMapper.getTypeFactory();
JavaType baseType = typeFactory.constructType(toolChest.getResultTypeReference());
JavaType bySegmentType = typeFactory.constructParametricType(Result.class, typeFactory.constructParametricType(BySegmentResultValueClass.class, baseType));
types = Pair.of(baseType, bySegmentType);
typesMap.put(query.getClass(), types);
}
final JavaType typeRef;
if (isBySegment) {
typeRef = types.rhs;
} else {
typeRef = types.lhs;
}
final ListenableFuture<InputStream> future;
final String url = String.format("http://%s/druid/v2/", host);
final String cancelUrl = String.format("http://%s/druid/v2/%s", host, query.getId());
try {
log.debug("Querying queryId[%s] url[%s]", query.getId(), url);
final long requestStartTime = System.currentTimeMillis();
final ServiceMetricEvent.Builder builder = toolChest.makeMetricBuilder(query);
builder.setDimension("server", host);
final HttpResponseHandler<InputStream, InputStream> responseHandler = new HttpResponseHandler<InputStream, InputStream>() {
private long responseStartTime;
private final AtomicLong byteCount = new AtomicLong(0);
private final BlockingQueue<InputStream> queue = new LinkedBlockingQueue<>();
private final AtomicBoolean done = new AtomicBoolean(false);
@Override
public ClientResponse<InputStream> handleResponse(HttpResponse response) {
log.debug("Initial response from url[%s] for queryId[%s]", url, query.getId());
responseStartTime = System.currentTimeMillis();
emitter.emit(builder.build("query/node/ttfb", responseStartTime - requestStartTime));
try {
final String responseContext = response.headers().get("X-Druid-Response-Context");
// context may be null in case of error or query timeout
if (responseContext != null) {
context.putAll(objectMapper.<Map<String, Object>>readValue(responseContext, new TypeReference<Map<String, Object>>() {
}));
}
queue.put(new ChannelBufferInputStream(response.getContent()));
} catch (final IOException e) {
log.error(e, "Error parsing response context from url [%s]", url);
return ClientResponse.<InputStream>finished(new InputStream() {
@Override
public int read() throws IOException {
throw e;
}
});
} catch (InterruptedException e) {
log.error(e, "Queue appending interrupted");
Thread.currentThread().interrupt();
throw Throwables.propagate(e);
}
byteCount.addAndGet(response.getContent().readableBytes());
return ClientResponse.<InputStream>finished(new SequenceInputStream(new Enumeration<InputStream>() {
@Override
public boolean hasMoreElements() {
// Then the stream should be spouting good InputStreams.
synchronized (done) {
return !done.get() || !queue.isEmpty();
}
}
@Override
public InputStream nextElement() {
try {
return queue.take();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw Throwables.propagate(e);
}
}
}));
}
@Override
public ClientResponse<InputStream> handleChunk(ClientResponse<InputStream> clientResponse, HttpChunk chunk) {
final ChannelBuffer channelBuffer = chunk.getContent();
final int bytes = channelBuffer.readableBytes();
if (bytes > 0) {
try {
queue.put(new ChannelBufferInputStream(channelBuffer));
} catch (InterruptedException e) {
log.error(e, "Unable to put finalizing input stream into Sequence queue for url [%s]", url);
Thread.currentThread().interrupt();
throw Throwables.propagate(e);
}
byteCount.addAndGet(bytes);
}
return clientResponse;
}
@Override
public ClientResponse<InputStream> done(ClientResponse<InputStream> clientResponse) {
long stopTime = System.currentTimeMillis();
log.debug("Completed queryId[%s] request to url[%s] with %,d bytes returned in %,d millis [%,f b/s].", query.getId(), url, byteCount.get(), stopTime - responseStartTime, byteCount.get() / (0.0001 * (stopTime - responseStartTime)));
emitter.emit(builder.build("query/node/time", stopTime - requestStartTime));
emitter.emit(builder.build("query/node/bytes", byteCount.get()));
synchronized (done) {
try {
// An empty byte array is put at the end to give the SequenceInputStream.close() as something to close out
// after done is set to true, regardless of the rest of the stream's state.
queue.put(ByteSource.empty().openStream());
} catch (InterruptedException e) {
log.error(e, "Unable to put finalizing input stream into Sequence queue for url [%s]", url);
Thread.currentThread().interrupt();
throw Throwables.propagate(e);
} catch (IOException e) {
// This should never happen
throw Throwables.propagate(e);
} finally {
done.set(true);
}
}
return ClientResponse.<InputStream>finished(clientResponse.getObj());
}
@Override
public void exceptionCaught(final ClientResponse<InputStream> clientResponse, final Throwable e) {
// Don't wait for lock in case the lock had something to do with the error
synchronized (done) {
done.set(true);
// Make a best effort to put a zero length buffer into the queue in case something is waiting on the take()
// If nothing is waiting on take(), this will be closed out anyways.
queue.offer(new InputStream() {
@Override
public int read() throws IOException {
throw new IOException(e);
}
});
}
}
};
future = httpClient.go(new Request(HttpMethod.POST, new URL(url)).setContent(objectMapper.writeValueAsBytes(query)).setHeader(HttpHeaders.Names.CONTENT_TYPE, isSmile ? SmileMediaTypes.APPLICATION_JACKSON_SMILE : MediaType.APPLICATION_JSON), responseHandler);
queryWatcher.registerQuery(query, future);
openConnections.getAndIncrement();
Futures.addCallback(future, new FutureCallback<InputStream>() {
@Override
public void onSuccess(InputStream result) {
openConnections.getAndDecrement();
}
@Override
public void onFailure(Throwable t) {
openConnections.getAndDecrement();
if (future.isCancelled()) {
// forward the cancellation to underlying queriable node
try {
StatusResponseHolder res = httpClient.go(new Request(HttpMethod.DELETE, new URL(cancelUrl)).setContent(objectMapper.writeValueAsBytes(query)).setHeader(HttpHeaders.Names.CONTENT_TYPE, isSmile ? SmileMediaTypes.APPLICATION_JACKSON_SMILE : MediaType.APPLICATION_JSON), new StatusResponseHandler(Charsets.UTF_8)).get();
if (res.getStatus().getCode() >= 500) {
throw new RE("Error cancelling query[%s]: queriable node returned status[%d] [%s].", res.getStatus().getCode(), res.getStatus().getReasonPhrase());
}
} catch (IOException | ExecutionException | InterruptedException e) {
Throwables.propagate(e);
}
}
}
});
} catch (IOException e) {
throw Throwables.propagate(e);
}
Sequence<T> retVal = new BaseSequence<>(new BaseSequence.IteratorMaker<T, JsonParserIterator<T>>() {
@Override
public JsonParserIterator<T> make() {
return new JsonParserIterator<T>(typeRef, future, url);
}
@Override
public void cleanup(JsonParserIterator<T> iterFromMake) {
CloseQuietly.close(iterFromMake);
}
});
// avoid the cost of de-serializing and then re-serializing again when adding to cache
if (!isBySegment) {
retVal = Sequences.map(retVal, toolChest.makePreComputeManipulatorFn(query, MetricManipulatorFns.deserializing()));
}
return retVal;
}
use of io.druid.query.Query in project druid by druid-io.
the class RealtimeManager method getQueryRunnerForSegments.
@Override
public <T> QueryRunner<T> getQueryRunnerForSegments(final Query<T> query, final Iterable<SegmentDescriptor> specs) {
final QueryRunnerFactory<T, Query<T>> factory = conglomerate.findFactory(query);
final Map<Integer, FireChief> partitionChiefs = chiefs.get(Iterables.getOnlyElement(query.getDataSource().getNames()));
return partitionChiefs == null ? new NoopQueryRunner<T>() : factory.getToolchest().mergeResults(factory.mergeRunners(MoreExecutors.sameThreadExecutor(), Iterables.transform(specs, new Function<SegmentDescriptor, QueryRunner<T>>() {
@Override
public QueryRunner<T> apply(SegmentDescriptor spec) {
final FireChief retVal = partitionChiefs.get(spec.getPartitionNumber());
return retVal == null ? new NoopQueryRunner<T>() : retVal.getQueryRunner(query.withQuerySegmentSpec(new SpecificSegmentSpec(spec)));
}
})));
}
use of io.druid.query.Query in project druid by druid-io.
the class ServerManager method getQueryRunnerForSegments.
@Override
public <T> QueryRunner<T> getQueryRunnerForSegments(Query<T> query, Iterable<SegmentDescriptor> specs) {
final QueryRunnerFactory<T, Query<T>> factory = conglomerate.findFactory(query);
if (factory == null) {
log.makeAlert("Unknown query type, [%s]", query.getClass()).addData("dataSource", query.getDataSource()).emit();
return new NoopQueryRunner<T>();
}
final QueryToolChest<T, Query<T>> toolChest = factory.getToolchest();
String dataSourceName = getDataSourceName(query.getDataSource());
final VersionedIntervalTimeline<String, ReferenceCountingSegment> timeline = dataSources.get(dataSourceName);
if (timeline == null) {
return new NoopQueryRunner<T>();
}
final Function<Query<T>, ServiceMetricEvent.Builder> builderFn = getBuilderFn(toolChest);
final AtomicLong cpuTimeAccumulator = new AtomicLong(0L);
FunctionalIterable<QueryRunner<T>> queryRunners = FunctionalIterable.create(specs).transformCat(new Function<SegmentDescriptor, Iterable<QueryRunner<T>>>() {
@Override
@SuppressWarnings("unchecked")
public Iterable<QueryRunner<T>> apply(SegmentDescriptor input) {
final PartitionHolder<ReferenceCountingSegment> entry = timeline.findEntry(input.getInterval(), input.getVersion());
if (entry == null) {
return Arrays.<QueryRunner<T>>asList(new ReportTimelineMissingSegmentQueryRunner<T>(input));
}
final PartitionChunk<ReferenceCountingSegment> chunk = entry.getChunk(input.getPartitionNumber());
if (chunk == null) {
return Arrays.<QueryRunner<T>>asList(new ReportTimelineMissingSegmentQueryRunner<T>(input));
}
final ReferenceCountingSegment adapter = chunk.getObject();
return Arrays.asList(buildAndDecorateQueryRunner(factory, toolChest, adapter, input, builderFn, cpuTimeAccumulator));
}
});
return CPUTimeMetricQueryRunner.safeBuild(new FinalizeResultsQueryRunner<>(toolChest.mergeResults(factory.mergeRunners(exec, queryRunners)), toolChest), builderFn, emitter, cpuTimeAccumulator, true);
}
use of io.druid.query.Query in project druid by druid-io.
the class LoggingRequestLogger method log.
@Override
public void log(RequestLogLine requestLogLine) throws IOException {
final Map mdc = MDC.getCopyOfContextMap();
// MDC must be set during the `LOG.info` call at the end of the try block.
try {
if (setMDC) {
try {
final Query query = requestLogLine.getQuery();
MDC.put("queryId", query.getId());
MDC.put("dataSource", query.getDataSource().toString());
MDC.put("queryType", query.getType());
MDC.put("hasFilters", Boolean.toString(query.hasFilters()));
MDC.put("remoteAddr", requestLogLine.getRemoteAddr());
MDC.put("duration", query.getDuration().toString());
MDC.put("descending", Boolean.toString(query.isDescending()));
if (setContextMDC) {
final Iterable<Map.Entry<String, Object>> entries = query.getContext() == null ? ImmutableList.<Map.Entry<String, Object>>of() : query.getContext().entrySet();
for (Map.Entry<String, Object> entry : entries) {
MDC.put(entry.getKey(), entry.getValue() == null ? "NULL" : entry.getValue().toString());
}
}
} catch (RuntimeException re) {
LOG.error(re, "Error preparing MDC");
}
}
final String line = requestLogLine.getLine(mapper);
// MDC must be set here
LOG.info("%s", line);
} finally {
if (setMDC) {
if (mdc != null) {
MDC.setContextMap(mdc);
} else {
MDC.clear();
}
}
}
}
use of io.druid.query.Query in project druid by druid-io.
the class YeOldePlumberSchool method findPlumber.
@Override
public Plumber findPlumber(final DataSchema schema, final RealtimeTuningConfig config, final FireDepartmentMetrics metrics) {
// There can be only one.
final Sink theSink = new Sink(interval, schema, config.getShardSpec(), version, config.getMaxRowsInMemory(), config.isReportParseExceptions());
// Temporary directory to hold spilled segments.
final File persistDir = new File(tmpSegmentDir, theSink.getSegment().getIdentifier());
// Set of spilled segments. Will be merged at the end.
final Set<File> spilled = Sets.newHashSet();
// IndexMerger implementation.
final IndexMerger theIndexMerger = config.getBuildV9Directly() ? indexMergerV9 : indexMerger;
return new Plumber() {
@Override
public Object startJob() {
return null;
}
@Override
public int add(InputRow row, Supplier<Committer> committerSupplier) throws IndexSizeExceededException {
Sink sink = getSink(row.getTimestampFromEpoch());
if (sink == null) {
return -1;
}
final int numRows = sink.add(row);
if (!sink.canAppendRow()) {
persist(committerSupplier.get());
}
return numRows;
}
private Sink getSink(long timestamp) {
if (theSink.getInterval().contains(timestamp)) {
return theSink;
} else {
return null;
}
}
@Override
public <T> QueryRunner<T> getQueryRunner(Query<T> query) {
throw new UnsupportedOperationException("Don't query me, bro.");
}
@Override
public void persist(Committer committer) {
spillIfSwappable();
committer.run();
}
@Override
public void finishJob() {
// The segment we will upload
File fileToUpload = null;
try {
// User should have persisted everything by now.
Preconditions.checkState(!theSink.swappable(), "All data must be persisted before fininshing the job!");
if (spilled.size() == 0) {
throw new IllegalStateException("Nothing indexed?");
} else if (spilled.size() == 1) {
fileToUpload = Iterables.getOnlyElement(spilled);
} else {
List<QueryableIndex> indexes = Lists.newArrayList();
for (final File oneSpill : spilled) {
indexes.add(indexIO.loadIndex(oneSpill));
}
fileToUpload = new File(tmpSegmentDir, "merged");
theIndexMerger.mergeQueryableIndex(indexes, schema.getGranularitySpec().isRollup(), schema.getAggregators(), fileToUpload, config.getIndexSpec());
}
// Map merged segment so we can extract dimensions
final QueryableIndex mappedSegment = indexIO.loadIndex(fileToUpload);
final DataSegment segmentToUpload = theSink.getSegment().withDimensions(ImmutableList.copyOf(mappedSegment.getAvailableDimensions())).withBinaryVersion(SegmentUtils.getVersionFromDir(fileToUpload));
dataSegmentPusher.push(fileToUpload, segmentToUpload);
log.info("Uploaded segment[%s]", segmentToUpload.getIdentifier());
} catch (Exception e) {
log.warn(e, "Failed to merge and upload");
throw Throwables.propagate(e);
} finally {
try {
if (fileToUpload != null) {
log.info("Deleting Index File[%s]", fileToUpload);
FileUtils.deleteDirectory(fileToUpload);
}
} catch (IOException e) {
log.warn(e, "Error deleting directory[%s]", fileToUpload);
}
}
}
private void spillIfSwappable() {
if (theSink.swappable()) {
final FireHydrant indexToPersist = theSink.swap();
final int rowsToPersist = indexToPersist.getIndex().size();
final File dirToPersist = getSpillDir(indexToPersist.getCount());
log.info("Spilling index[%d] with rows[%d] to: %s", indexToPersist.getCount(), rowsToPersist, dirToPersist);
try {
theIndexMerger.persist(indexToPersist.getIndex(), dirToPersist, config.getIndexSpec());
indexToPersist.swapSegment(null);
metrics.incrementRowOutputCount(rowsToPersist);
spilled.add(dirToPersist);
} catch (Exception e) {
log.warn(e, "Failed to spill index[%d]", indexToPersist.getCount());
throw Throwables.propagate(e);
}
}
}
private File getSpillDir(final int n) {
return new File(persistDir, String.format("spill%d", n));
}
};
}
Aggregations