use of org.apache.druid.query.QueryInterruptedException in project druid by druid-io.
the class AsyncQueryForwardingServlet method handleQueryParseException.
@VisibleForTesting
void handleQueryParseException(HttpServletRequest request, HttpServletResponse response, ObjectMapper objectMapper, IOException parseException, boolean isNativeQuery) throws IOException {
QueryInterruptedException exceptionToReport = QueryInterruptedException.wrapIfNeeded(parseException);
LOG.warn(exceptionToReport, "Exception parsing query");
// Log the error message
final String errorMessage = exceptionToReport.getMessage() == null ? "no error message" : exceptionToReport.getMessage();
if (isNativeQuery) {
requestLogger.logNativeQuery(RequestLogLine.forNative(null, DateTimes.nowUtc(), request.getRemoteAddr(), new QueryStats(ImmutableMap.of("success", false, "exception", errorMessage))));
} else {
requestLogger.logSqlQuery(RequestLogLine.forSql(null, null, DateTimes.nowUtc(), request.getRemoteAddr(), new QueryStats(ImmutableMap.of("success", false, "exception", errorMessage))));
}
// Write to the response
response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
response.setContentType(MediaType.APPLICATION_JSON);
objectMapper.writeValue(response.getOutputStream(), serverConfig.getErrorResponseTransformStrategy().transformIfNeeded(exceptionToReport));
}
use of org.apache.druid.query.QueryInterruptedException in project druid by druid-io.
the class ConcurrentGrouper method parallelSortAndGetGroupersIterator.
private List<CloseableIterator<Entry<KeyType>>> parallelSortAndGetGroupersIterator() {
// The number of groupers is same with the number of processing threads in the executor
final List<ListenableFuture<CloseableIterator<Entry<KeyType>>>> futures = groupers.stream().map(grouper -> executor.submit(new AbstractPrioritizedCallable<CloseableIterator<Entry<KeyType>>>(priority) {
@Override
public CloseableIterator<Entry<KeyType>> call() {
return grouper.iterator(true);
}
})).collect(Collectors.toList());
ListenableFuture<List<CloseableIterator<Entry<KeyType>>>> future = Futures.allAsList(futures);
try {
final long timeout = queryTimeoutAt - System.currentTimeMillis();
return hasQueryTimeout ? future.get(timeout, TimeUnit.MILLISECONDS) : future.get();
} catch (InterruptedException | CancellationException e) {
GuavaUtils.cancelAll(true, future, futures);
throw new QueryInterruptedException(e);
} catch (TimeoutException e) {
GuavaUtils.cancelAll(true, future, futures);
throw new QueryTimeoutException();
} catch (ExecutionException e) {
GuavaUtils.cancelAll(true, future, futures);
throw new RuntimeException(e.getCause());
}
}
use of org.apache.druid.query.QueryInterruptedException in project druid by druid-io.
the class GroupByMergingQueryRunnerV2 method run.
@Override
public Sequence<ResultRow> run(final QueryPlus<ResultRow> queryPlus, final ResponseContext responseContext) {
final GroupByQuery query = (GroupByQuery) queryPlus.getQuery();
final GroupByQueryConfig querySpecificConfig = config.withOverrides(query);
// CTX_KEY_MERGE_RUNNERS_USING_CHAINED_EXECUTION is here because realtime servers use nested mergeRunners calls
// (one for the entire query and one for each sink). We only want the outer call to actually do merging with a
// merge buffer, otherwise the query will allocate too many merge buffers. This is potentially sub-optimal as it
// will involve materializing the results for each sink before starting to feed them into the outer merge buffer.
// I'm not sure of a better way to do this without tweaking how realtime servers do queries.
final boolean forceChainedExecution = query.getContextBoolean(CTX_KEY_MERGE_RUNNERS_USING_CHAINED_EXECUTION, false);
final QueryPlus<ResultRow> queryPlusForRunners = queryPlus.withQuery(query.withOverriddenContext(ImmutableMap.of(CTX_KEY_MERGE_RUNNERS_USING_CHAINED_EXECUTION, true))).withoutThreadUnsafeState();
if (QueryContexts.isBySegment(query) || forceChainedExecution) {
ChainedExecutionQueryRunner<ResultRow> runner = new ChainedExecutionQueryRunner<>(queryProcessingPool, queryWatcher, queryables);
return runner.run(queryPlusForRunners, responseContext);
}
final boolean isSingleThreaded = querySpecificConfig.isSingleThreaded();
final File temporaryStorageDirectory = new File(processingTmpDir, StringUtils.format("druid-groupBy-%s_%s", UUID.randomUUID(), query.getId()));
final int priority = QueryContexts.getPriority(query);
// Figure out timeoutAt time now, so we can apply the timeout to both the mergeBufferPool.take and the actual
// query processing together.
final long queryTimeout = QueryContexts.getTimeout(query);
final boolean hasTimeout = QueryContexts.hasTimeout(query);
final long timeoutAt = System.currentTimeMillis() + queryTimeout;
return new BaseSequence<>(new BaseSequence.IteratorMaker<ResultRow, CloseableGrouperIterator<RowBasedKey, ResultRow>>() {
@Override
public CloseableGrouperIterator<RowBasedKey, ResultRow> make() {
final Closer resources = Closer.create();
try {
final LimitedTemporaryStorage temporaryStorage = new LimitedTemporaryStorage(temporaryStorageDirectory, querySpecificConfig.getMaxOnDiskStorage());
final ReferenceCountingResourceHolder<LimitedTemporaryStorage> temporaryStorageHolder = ReferenceCountingResourceHolder.fromCloseable(temporaryStorage);
resources.register(temporaryStorageHolder);
// If parallelCombine is enabled, we need two merge buffers for parallel aggregating and parallel combining
final int numMergeBuffers = querySpecificConfig.getNumParallelCombineThreads() > 1 ? 2 : 1;
final List<ReferenceCountingResourceHolder<ByteBuffer>> mergeBufferHolders = getMergeBuffersHolder(numMergeBuffers, hasTimeout, timeoutAt);
resources.registerAll(mergeBufferHolders);
final ReferenceCountingResourceHolder<ByteBuffer> mergeBufferHolder = mergeBufferHolders.get(0);
final ReferenceCountingResourceHolder<ByteBuffer> combineBufferHolder = numMergeBuffers == 2 ? mergeBufferHolders.get(1) : null;
Pair<Grouper<RowBasedKey>, Accumulator<AggregateResult, ResultRow>> pair = RowBasedGrouperHelper.createGrouperAccumulatorPair(query, null, config, Suppliers.ofInstance(mergeBufferHolder.get()), combineBufferHolder, concurrencyHint, temporaryStorage, spillMapper, // Passed as executor service
queryProcessingPool, priority, hasTimeout, timeoutAt, mergeBufferSize);
final Grouper<RowBasedKey> grouper = pair.lhs;
final Accumulator<AggregateResult, ResultRow> accumulator = pair.rhs;
grouper.init();
final ReferenceCountingResourceHolder<Grouper<RowBasedKey>> grouperHolder = ReferenceCountingResourceHolder.fromCloseable(grouper);
resources.register(grouperHolder);
List<ListenableFuture<AggregateResult>> futures = Lists.newArrayList(Iterables.transform(queryables, new Function<QueryRunner<ResultRow>, ListenableFuture<AggregateResult>>() {
@Override
public ListenableFuture<AggregateResult> apply(final QueryRunner<ResultRow> input) {
if (input == null) {
throw new ISE("Null queryRunner! Looks to be some segment unmapping action happening");
}
ListenableFuture<AggregateResult> future = queryProcessingPool.submitRunnerTask(new AbstractPrioritizedQueryRunnerCallable<AggregateResult, ResultRow>(priority, input) {
@Override
public AggregateResult call() {
try (// These variables are used to close releasers automatically.
@SuppressWarnings("unused") Releaser bufferReleaser = mergeBufferHolder.increment();
@SuppressWarnings("unused") Releaser grouperReleaser = grouperHolder.increment()) {
// Return true if OK, false if resources were exhausted.
return input.run(queryPlusForRunners, responseContext).accumulate(AggregateResult.ok(), accumulator);
} catch (QueryInterruptedException | QueryTimeoutException e) {
throw e;
} catch (Exception e) {
log.error(e, "Exception with one of the sequences!");
throw new RuntimeException(e);
}
}
});
if (isSingleThreaded) {
waitForFutureCompletion(query, ImmutableList.of(future), hasTimeout, timeoutAt - System.currentTimeMillis());
}
return future;
}
}));
if (!isSingleThreaded) {
waitForFutureCompletion(query, futures, hasTimeout, timeoutAt - System.currentTimeMillis());
}
return RowBasedGrouperHelper.makeGrouperIterator(grouper, query, resources);
} catch (Throwable t) {
// Exception caught while setting up the iterator; release resources.
try {
resources.close();
} catch (Exception ex) {
t.addSuppressed(ex);
}
throw t;
}
}
@Override
public void cleanup(CloseableGrouperIterator<RowBasedKey, ResultRow> iterFromMake) {
iterFromMake.close();
}
});
}
use of org.apache.druid.query.QueryInterruptedException in project druid by druid-io.
the class SegmentMetadataQueryRunnerFactory method mergeRunners.
@Override
public QueryRunner<SegmentAnalysis> mergeRunners(QueryProcessingPool queryProcessingPool, Iterable<QueryRunner<SegmentAnalysis>> queryRunners) {
return new ConcatQueryRunner<SegmentAnalysis>(Sequences.map(Sequences.simple(queryRunners), new Function<QueryRunner<SegmentAnalysis>, QueryRunner<SegmentAnalysis>>() {
@Override
public QueryRunner<SegmentAnalysis> apply(final QueryRunner<SegmentAnalysis> input) {
return new QueryRunner<SegmentAnalysis>() {
@Override
public Sequence<SegmentAnalysis> run(final QueryPlus<SegmentAnalysis> queryPlus, final ResponseContext responseContext) {
final Query<SegmentAnalysis> query = queryPlus.getQuery();
final int priority = QueryContexts.getPriority(query);
final QueryPlus<SegmentAnalysis> threadSafeQueryPlus = queryPlus.withoutThreadUnsafeState();
ListenableFuture<Sequence<SegmentAnalysis>> future = queryProcessingPool.submitRunnerTask(new AbstractPrioritizedQueryRunnerCallable<Sequence<SegmentAnalysis>, SegmentAnalysis>(priority, input) {
@Override
public Sequence<SegmentAnalysis> call() {
return Sequences.simple(input.run(threadSafeQueryPlus, responseContext).toList());
}
});
try {
queryWatcher.registerQueryFuture(query, future);
if (QueryContexts.hasTimeout(query)) {
return future.get(QueryContexts.getTimeout(query), TimeUnit.MILLISECONDS);
} else {
return future.get();
}
} catch (InterruptedException e) {
log.warn(e, "Query interrupted, cancelling pending results, query id [%s]", query.getId());
future.cancel(true);
throw new QueryInterruptedException(e);
} catch (CancellationException e) {
throw new QueryInterruptedException(e);
} catch (TimeoutException e) {
log.info("Query timeout, cancelling pending results for query id [%s]", query.getId());
future.cancel(true);
throw new QueryTimeoutException(StringUtils.nonStrictFormat("Query [%s] timed out", query.getId()));
} catch (ExecutionException e) {
throw new RuntimeException(e);
}
}
};
}
}));
}
use of org.apache.druid.query.QueryInterruptedException in project druid by druid-io.
the class QueryLifecycle method emitLogsAndMetrics.
/**
* Emit logs and metrics for this query.
*
* @param e exception that occurred while processing this query
* @param remoteAddress remote address, for logging; or null if unknown
* @param bytesWritten number of bytes written; will become a query/bytes metric if >= 0
*/
@SuppressWarnings("unchecked")
public void emitLogsAndMetrics(@Nullable final Throwable e, @Nullable final String remoteAddress, final long bytesWritten) {
if (baseQuery == null) {
// Never initialized, don't log or emit anything.
return;
}
if (state == State.DONE) {
log.warn("Tried to emit logs and metrics twice for query[%s]!", baseQuery.getId());
}
state = State.DONE;
final boolean success = e == null;
try {
final long queryTimeNs = System.nanoTime() - startNs;
QueryMetrics queryMetrics = DruidMetrics.makeRequestMetrics(queryMetricsFactory, toolChest, baseQuery, StringUtils.nullToEmptyNonDruidDataString(remoteAddress));
queryMetrics.success(success);
queryMetrics.reportQueryTime(queryTimeNs);
if (bytesWritten >= 0) {
queryMetrics.reportQueryBytes(bytesWritten);
}
if (authenticationResult != null) {
queryMetrics.identity(authenticationResult.getIdentity());
}
queryMetrics.emit(emitter);
final Map<String, Object> statsMap = new LinkedHashMap<>();
statsMap.put("query/time", TimeUnit.NANOSECONDS.toMillis(queryTimeNs));
statsMap.put("query/bytes", bytesWritten);
statsMap.put("success", success);
if (authenticationResult != null) {
statsMap.put("identity", authenticationResult.getIdentity());
}
if (e != null) {
statsMap.put("exception", e.toString());
if (QueryContexts.isDebug(baseQuery)) {
log.warn(e, "Exception while processing queryId [%s]", baseQuery.getId());
} else {
log.noStackTrace().warn(e, "Exception while processing queryId [%s]", baseQuery.getId());
}
if (e instanceof QueryInterruptedException || e instanceof QueryTimeoutException) {
// Mimic behavior from QueryResource, where this code was originally taken from.
statsMap.put("interrupted", true);
statsMap.put("reason", e.toString());
}
}
requestLogger.logNativeQuery(RequestLogLine.forNative(baseQuery, DateTimes.utc(startMs), StringUtils.nullToEmptyNonDruidDataString(remoteAddress), new QueryStats(statsMap)));
} catch (Exception ex) {
log.error(ex, "Unable to log query [%s]!", baseQuery);
}
}
Aggregations