Search in sources :

Example 6 with QueryTimeoutException

use of org.apache.druid.query.QueryTimeoutException in project druid by druid-io.

the class DirectDruidClient method run.

@Override
public Sequence<T> run(final QueryPlus<T> queryPlus, final ResponseContext context) {
    final Query<T> query = queryPlus.getQuery();
    QueryToolChest<T, Query<T>> toolChest = warehouse.getToolChest(query);
    boolean isBySegment = QueryContexts.isBySegment(query);
    final JavaType queryResultType = isBySegment ? toolChest.getBySegmentResultType() : toolChest.getBaseResultType();
    final ListenableFuture<InputStream> future;
    final String url = scheme + "://" + host + "/druid/v2/";
    final String cancelUrl = url + query.getId();
    try {
        log.debug("Querying queryId[%s] url[%s]", query.getId(), url);
        final long requestStartTimeNs = System.nanoTime();
        final long timeoutAt = query.getContextValue(QUERY_FAIL_TIME);
        final long maxScatterGatherBytes = QueryContexts.getMaxScatterGatherBytes(query);
        final AtomicLong totalBytesGathered = context.getTotalBytes();
        final long maxQueuedBytes = QueryContexts.getMaxQueuedBytes(query, 0);
        final boolean usingBackpressure = maxQueuedBytes > 0;
        final HttpResponseHandler<InputStream, InputStream> responseHandler = new HttpResponseHandler<InputStream, InputStream>() {

            private final AtomicLong totalByteCount = new AtomicLong(0);

            private final AtomicLong queuedByteCount = new AtomicLong(0);

            private final AtomicLong channelSuspendedTime = new AtomicLong(0);

            private final BlockingQueue<InputStreamHolder> queue = new LinkedBlockingQueue<>();

            private final AtomicBoolean done = new AtomicBoolean(false);

            private final AtomicReference<String> fail = new AtomicReference<>();

            private final AtomicReference<TrafficCop> trafficCopRef = new AtomicReference<>();

            private QueryMetrics<? super Query<T>> queryMetrics;

            private long responseStartTimeNs;

            private QueryMetrics<? super Query<T>> acquireResponseMetrics() {
                if (queryMetrics == null) {
                    queryMetrics = toolChest.makeMetrics(query);
                    queryMetrics.server(host);
                }
                return queryMetrics;
            }

            /**
             * Queue a buffer. Returns true if we should keep reading, false otherwise.
             */
            private boolean enqueue(ChannelBuffer buffer, long chunkNum) throws InterruptedException {
                // Increment queuedByteCount before queueing the object, so queuedByteCount is at least as high as
                // the actual number of queued bytes at any particular time.
                final InputStreamHolder holder = InputStreamHolder.fromChannelBuffer(buffer, chunkNum);
                final long currentQueuedByteCount = queuedByteCount.addAndGet(holder.getLength());
                queue.put(holder);
                // True if we should keep reading.
                return !usingBackpressure || currentQueuedByteCount < maxQueuedBytes;
            }

            private InputStream dequeue() throws InterruptedException {
                final InputStreamHolder holder = queue.poll(checkQueryTimeout(), TimeUnit.MILLISECONDS);
                if (holder == null) {
                    throw new QueryTimeoutException(StringUtils.nonStrictFormat("Query[%s] url[%s] timed out.", query.getId(), url));
                }
                final long currentQueuedByteCount = queuedByteCount.addAndGet(-holder.getLength());
                if (usingBackpressure && currentQueuedByteCount < maxQueuedBytes) {
                    long backPressureTime = Preconditions.checkNotNull(trafficCopRef.get(), "No TrafficCop, how can this be?").resume(holder.getChunkNum());
                    channelSuspendedTime.addAndGet(backPressureTime);
                }
                return holder.getStream();
            }

            @Override
            public ClientResponse<InputStream> handleResponse(HttpResponse response, TrafficCop trafficCop) {
                trafficCopRef.set(trafficCop);
                checkQueryTimeout();
                checkTotalBytesLimit(response.getContent().readableBytes());
                log.debug("Initial response from url[%s] for queryId[%s]", url, query.getId());
                responseStartTimeNs = System.nanoTime();
                acquireResponseMetrics().reportNodeTimeToFirstByte(responseStartTimeNs - requestStartTimeNs).emit(emitter);
                final boolean continueReading;
                try {
                    log.trace("Got a response from [%s] for query ID[%s], subquery ID[%s]", url, query.getId(), query.getSubQueryId());
                    final String responseContext = response.headers().get(QueryResource.HEADER_RESPONSE_CONTEXT);
                    context.addRemainingResponse(query.getMostSpecificId(), VAL_TO_REDUCE_REMAINING_RESPONSES);
                    // context may be null in case of error or query timeout
                    if (responseContext != null) {
                        context.merge(ResponseContext.deserialize(responseContext, objectMapper));
                    }
                    continueReading = enqueue(response.getContent(), 0L);
                } catch (final IOException e) {
                    log.error(e, "Error parsing response context from url [%s]", url);
                    return ClientResponse.finished(new InputStream() {

                        @Override
                        public int read() throws IOException {
                            throw e;
                        }
                    });
                } catch (InterruptedException e) {
                    log.error(e, "Queue appending interrupted");
                    Thread.currentThread().interrupt();
                    throw new RuntimeException(e);
                }
                totalByteCount.addAndGet(response.getContent().readableBytes());
                return ClientResponse.finished(new SequenceInputStream(new Enumeration<InputStream>() {

                    @Override
                    public boolean hasMoreElements() {
                        if (fail.get() != null) {
                            throw new RE(fail.get());
                        }
                        checkQueryTimeout();
                        // Then the stream should be spouting good InputStreams.
                        synchronized (done) {
                            return !done.get() || !queue.isEmpty();
                        }
                    }

                    @Override
                    public InputStream nextElement() {
                        if (fail.get() != null) {
                            throw new RE(fail.get());
                        }
                        try {
                            return dequeue();
                        } catch (InterruptedException e) {
                            Thread.currentThread().interrupt();
                            throw new RuntimeException(e);
                        }
                    }
                }), continueReading);
            }

            @Override
            public ClientResponse<InputStream> handleChunk(ClientResponse<InputStream> clientResponse, HttpChunk chunk, long chunkNum) {
                checkQueryTimeout();
                final ChannelBuffer channelBuffer = chunk.getContent();
                final int bytes = channelBuffer.readableBytes();
                checkTotalBytesLimit(bytes);
                boolean continueReading = true;
                if (bytes > 0) {
                    try {
                        continueReading = enqueue(channelBuffer, chunkNum);
                    } catch (InterruptedException e) {
                        log.error(e, "Unable to put finalizing input stream into Sequence queue for url [%s]", url);
                        Thread.currentThread().interrupt();
                        throw new RuntimeException(e);
                    }
                    totalByteCount.addAndGet(bytes);
                }
                return ClientResponse.finished(clientResponse.getObj(), continueReading);
            }

            @Override
            public ClientResponse<InputStream> done(ClientResponse<InputStream> clientResponse) {
                long stopTimeNs = System.nanoTime();
                long nodeTimeNs = stopTimeNs - requestStartTimeNs;
                final long nodeTimeMs = TimeUnit.NANOSECONDS.toMillis(nodeTimeNs);
                log.debug("Completed queryId[%s] request to url[%s] with %,d bytes returned in %,d millis [%,f b/s].", query.getId(), url, totalByteCount.get(), nodeTimeMs, // Floating math; division by zero will yield Inf, not exception
                totalByteCount.get() / (0.001 * nodeTimeMs));
                QueryMetrics<? super Query<T>> responseMetrics = acquireResponseMetrics();
                responseMetrics.reportNodeTime(nodeTimeNs);
                responseMetrics.reportNodeBytes(totalByteCount.get());
                if (usingBackpressure) {
                    responseMetrics.reportBackPressureTime(channelSuspendedTime.get());
                }
                responseMetrics.emit(emitter);
                synchronized (done) {
                    try {
                        // An empty byte array is put at the end to give the SequenceInputStream.close() as something to close out
                        // after done is set to true, regardless of the rest of the stream's state.
                        queue.put(InputStreamHolder.fromChannelBuffer(ChannelBuffers.EMPTY_BUFFER, Long.MAX_VALUE));
                    } catch (InterruptedException e) {
                        log.error(e, "Unable to put finalizing input stream into Sequence queue for url [%s]", url);
                        Thread.currentThread().interrupt();
                        throw new RuntimeException(e);
                    } finally {
                        done.set(true);
                    }
                }
                return ClientResponse.finished(clientResponse.getObj());
            }

            @Override
            public void exceptionCaught(final ClientResponse<InputStream> clientResponse, final Throwable e) {
                String msg = StringUtils.format("Query[%s] url[%s] failed with exception msg [%s]", query.getId(), url, e.getMessage());
                setupResponseReadFailure(msg, e);
            }

            private void setupResponseReadFailure(String msg, Throwable th) {
                fail.set(msg);
                queue.clear();
                queue.offer(InputStreamHolder.fromStream(new InputStream() {

                    @Override
                    public int read() throws IOException {
                        if (th != null) {
                            throw new IOException(msg, th);
                        } else {
                            throw new IOException(msg);
                        }
                    }
                }, -1, 0));
            }

            // Returns remaining timeout or throws exception if timeout already elapsed.
            private long checkQueryTimeout() {
                long timeLeft = timeoutAt - System.currentTimeMillis();
                if (timeLeft <= 0) {
                    String msg = StringUtils.format("Query[%s] url[%s] timed out.", query.getId(), url);
                    setupResponseReadFailure(msg, null);
                    throw new QueryTimeoutException(msg);
                } else {
                    return timeLeft;
                }
            }

            private void checkTotalBytesLimit(long bytes) {
                if (maxScatterGatherBytes < Long.MAX_VALUE && totalBytesGathered.addAndGet(bytes) > maxScatterGatherBytes) {
                    String msg = StringUtils.format("Query[%s] url[%s] max scatter-gather bytes limit reached.", query.getId(), url);
                    setupResponseReadFailure(msg, null);
                    throw new ResourceLimitExceededException(msg);
                }
            }
        };
        long timeLeft = timeoutAt - System.currentTimeMillis();
        if (timeLeft <= 0) {
            throw new QueryTimeoutException(StringUtils.nonStrictFormat("Query[%s] url[%s] timed out.", query.getId(), url));
        }
        future = httpClient.go(new Request(HttpMethod.POST, new URL(url)).setContent(objectMapper.writeValueAsBytes(QueryContexts.withTimeout(query, timeLeft))).setHeader(HttpHeaders.Names.CONTENT_TYPE, isSmile ? SmileMediaTypes.APPLICATION_JACKSON_SMILE : MediaType.APPLICATION_JSON), responseHandler, Duration.millis(timeLeft));
        queryWatcher.registerQueryFuture(query, future);
        openConnections.getAndIncrement();
        Futures.addCallback(future, new FutureCallback<InputStream>() {

            @Override
            public void onSuccess(InputStream result) {
                openConnections.getAndDecrement();
            }

            @Override
            public void onFailure(Throwable t) {
                openConnections.getAndDecrement();
                if (future.isCancelled()) {
                    cancelQuery(query, cancelUrl);
                }
            }
        }, // The callback is non-blocking and quick, so it's OK to schedule it using directExecutor()
        Execs.directExecutor());
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
    Sequence<T> retVal = new BaseSequence<>(new BaseSequence.IteratorMaker<T, JsonParserIterator<T>>() {

        @Override
        public JsonParserIterator<T> make() {
            return new JsonParserIterator<T>(queryResultType, future, url, query, host, toolChest.decorateObjectMapper(objectMapper, query));
        }

        @Override
        public void cleanup(JsonParserIterator<T> iterFromMake) {
            CloseableUtils.closeAndWrapExceptions(iterFromMake);
        }
    });
    // avoid the cost of de-serializing and then re-serializing again when adding to cache
    if (!isBySegment) {
        retVal = Sequences.map(retVal, toolChest.makePreComputeManipulatorFn(query, MetricManipulatorFns.deserializing()));
    }
    return retVal;
}
Also used : ClientResponse(org.apache.druid.java.util.http.client.response.ClientResponse) Query(org.apache.druid.query.Query) URL(java.net.URL) ChannelBuffer(org.jboss.netty.buffer.ChannelBuffer) QueryTimeoutException(org.apache.druid.query.QueryTimeoutException) BlockingQueue(java.util.concurrent.BlockingQueue) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) Enumeration(java.util.Enumeration) SequenceInputStream(java.io.SequenceInputStream) InputStream(java.io.InputStream) Request(org.apache.druid.java.util.http.client.Request) HttpResponse(org.jboss.netty.handler.codec.http.HttpResponse) AtomicReference(java.util.concurrent.atomic.AtomicReference) IOException(java.io.IOException) BaseSequence(org.apache.druid.java.util.common.guava.BaseSequence) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) JavaType(com.fasterxml.jackson.databind.JavaType) AtomicLong(java.util.concurrent.atomic.AtomicLong) RE(org.apache.druid.java.util.common.RE) ResourceLimitExceededException(org.apache.druid.query.ResourceLimitExceededException) SequenceInputStream(java.io.SequenceInputStream) QueryMetrics(org.apache.druid.query.QueryMetrics) HttpResponseHandler(org.apache.druid.java.util.http.client.response.HttpResponseHandler) HttpChunk(org.jboss.netty.handler.codec.http.HttpChunk)

Example 7 with QueryTimeoutException

use of org.apache.druid.query.QueryTimeoutException in project druid by druid-io.

the class ConcurrentGrouper method parallelSortAndGetGroupersIterator.

private List<CloseableIterator<Entry<KeyType>>> parallelSortAndGetGroupersIterator() {
    // The number of groupers is same with the number of processing threads in the executor
    final List<ListenableFuture<CloseableIterator<Entry<KeyType>>>> futures = groupers.stream().map(grouper -> executor.submit(new AbstractPrioritizedCallable<CloseableIterator<Entry<KeyType>>>(priority) {

        @Override
        public CloseableIterator<Entry<KeyType>> call() {
            return grouper.iterator(true);
        }
    })).collect(Collectors.toList());
    ListenableFuture<List<CloseableIterator<Entry<KeyType>>>> future = Futures.allAsList(futures);
    try {
        final long timeout = queryTimeoutAt - System.currentTimeMillis();
        return hasQueryTimeout ? future.get(timeout, TimeUnit.MILLISECONDS) : future.get();
    } catch (InterruptedException | CancellationException e) {
        GuavaUtils.cancelAll(true, future, futures);
        throw new QueryInterruptedException(e);
    } catch (TimeoutException e) {
        GuavaUtils.cancelAll(true, future, futures);
        throw new QueryTimeoutException();
    } catch (ExecutionException e) {
        GuavaUtils.cancelAll(true, future, futures);
        throw new RuntimeException(e.getCause());
    }
}
Also used : Arrays(java.util.Arrays) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) Supplier(com.google.common.base.Supplier) TimeoutException(java.util.concurrent.TimeoutException) ByteBuffer(java.nio.ByteBuffer) DefaultLimitSpec(org.apache.druid.query.groupby.orderby.DefaultLimitSpec) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) AbstractPrioritizedCallable(org.apache.druid.query.AbstractPrioritizedCallable) ColumnSelectorFactory(org.apache.druid.segment.ColumnSelectorFactory) ImmutableList(com.google.common.collect.ImmutableList) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Suppliers(com.google.common.base.Suppliers) CloseableIterator(org.apache.druid.java.util.common.parsers.CloseableIterator) Nullable(javax.annotation.Nullable) CancellationException(java.util.concurrent.CancellationException) QueryInterruptedException(org.apache.druid.query.QueryInterruptedException) AggregatorFactory(org.apache.druid.query.aggregation.AggregatorFactory) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) GroupByQueryConfig(org.apache.druid.query.groupby.GroupByQueryConfig) Set(java.util.Set) ISE(org.apache.druid.java.util.common.ISE) Collectors(java.util.stream.Collectors) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) Futures(com.google.common.util.concurrent.Futures) List(java.util.List) CloseableIterators(org.apache.druid.java.util.common.CloseableIterators) QueryTimeoutException(org.apache.druid.query.QueryTimeoutException) GuavaUtils(org.apache.druid.common.guava.GuavaUtils) Preconditions(com.google.common.base.Preconditions) Comparator(java.util.Comparator) ReferenceCountingResourceHolder(org.apache.druid.collections.ReferenceCountingResourceHolder) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) CloseableIterator(org.apache.druid.java.util.common.parsers.CloseableIterator) QueryInterruptedException(org.apache.druid.query.QueryInterruptedException) QueryTimeoutException(org.apache.druid.query.QueryTimeoutException) CancellationException(java.util.concurrent.CancellationException) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) ArrayList(java.util.ArrayList) ImmutableList(com.google.common.collect.ImmutableList) List(java.util.List) ExecutionException(java.util.concurrent.ExecutionException) QueryInterruptedException(org.apache.druid.query.QueryInterruptedException) TimeoutException(java.util.concurrent.TimeoutException) QueryTimeoutException(org.apache.druid.query.QueryTimeoutException)

Example 8 with QueryTimeoutException

use of org.apache.druid.query.QueryTimeoutException in project druid by druid-io.

the class GroupByMergingQueryRunnerV2 method run.

@Override
public Sequence<ResultRow> run(final QueryPlus<ResultRow> queryPlus, final ResponseContext responseContext) {
    final GroupByQuery query = (GroupByQuery) queryPlus.getQuery();
    final GroupByQueryConfig querySpecificConfig = config.withOverrides(query);
    // CTX_KEY_MERGE_RUNNERS_USING_CHAINED_EXECUTION is here because realtime servers use nested mergeRunners calls
    // (one for the entire query and one for each sink). We only want the outer call to actually do merging with a
    // merge buffer, otherwise the query will allocate too many merge buffers. This is potentially sub-optimal as it
    // will involve materializing the results for each sink before starting to feed them into the outer merge buffer.
    // I'm not sure of a better way to do this without tweaking how realtime servers do queries.
    final boolean forceChainedExecution = query.getContextBoolean(CTX_KEY_MERGE_RUNNERS_USING_CHAINED_EXECUTION, false);
    final QueryPlus<ResultRow> queryPlusForRunners = queryPlus.withQuery(query.withOverriddenContext(ImmutableMap.of(CTX_KEY_MERGE_RUNNERS_USING_CHAINED_EXECUTION, true))).withoutThreadUnsafeState();
    if (QueryContexts.isBySegment(query) || forceChainedExecution) {
        ChainedExecutionQueryRunner<ResultRow> runner = new ChainedExecutionQueryRunner<>(queryProcessingPool, queryWatcher, queryables);
        return runner.run(queryPlusForRunners, responseContext);
    }
    final boolean isSingleThreaded = querySpecificConfig.isSingleThreaded();
    final File temporaryStorageDirectory = new File(processingTmpDir, StringUtils.format("druid-groupBy-%s_%s", UUID.randomUUID(), query.getId()));
    final int priority = QueryContexts.getPriority(query);
    // Figure out timeoutAt time now, so we can apply the timeout to both the mergeBufferPool.take and the actual
    // query processing together.
    final long queryTimeout = QueryContexts.getTimeout(query);
    final boolean hasTimeout = QueryContexts.hasTimeout(query);
    final long timeoutAt = System.currentTimeMillis() + queryTimeout;
    return new BaseSequence<>(new BaseSequence.IteratorMaker<ResultRow, CloseableGrouperIterator<RowBasedKey, ResultRow>>() {

        @Override
        public CloseableGrouperIterator<RowBasedKey, ResultRow> make() {
            final Closer resources = Closer.create();
            try {
                final LimitedTemporaryStorage temporaryStorage = new LimitedTemporaryStorage(temporaryStorageDirectory, querySpecificConfig.getMaxOnDiskStorage());
                final ReferenceCountingResourceHolder<LimitedTemporaryStorage> temporaryStorageHolder = ReferenceCountingResourceHolder.fromCloseable(temporaryStorage);
                resources.register(temporaryStorageHolder);
                // If parallelCombine is enabled, we need two merge buffers for parallel aggregating and parallel combining
                final int numMergeBuffers = querySpecificConfig.getNumParallelCombineThreads() > 1 ? 2 : 1;
                final List<ReferenceCountingResourceHolder<ByteBuffer>> mergeBufferHolders = getMergeBuffersHolder(numMergeBuffers, hasTimeout, timeoutAt);
                resources.registerAll(mergeBufferHolders);
                final ReferenceCountingResourceHolder<ByteBuffer> mergeBufferHolder = mergeBufferHolders.get(0);
                final ReferenceCountingResourceHolder<ByteBuffer> combineBufferHolder = numMergeBuffers == 2 ? mergeBufferHolders.get(1) : null;
                Pair<Grouper<RowBasedKey>, Accumulator<AggregateResult, ResultRow>> pair = RowBasedGrouperHelper.createGrouperAccumulatorPair(query, null, config, Suppliers.ofInstance(mergeBufferHolder.get()), combineBufferHolder, concurrencyHint, temporaryStorage, spillMapper, // Passed as executor service
                queryProcessingPool, priority, hasTimeout, timeoutAt, mergeBufferSize);
                final Grouper<RowBasedKey> grouper = pair.lhs;
                final Accumulator<AggregateResult, ResultRow> accumulator = pair.rhs;
                grouper.init();
                final ReferenceCountingResourceHolder<Grouper<RowBasedKey>> grouperHolder = ReferenceCountingResourceHolder.fromCloseable(grouper);
                resources.register(grouperHolder);
                List<ListenableFuture<AggregateResult>> futures = Lists.newArrayList(Iterables.transform(queryables, new Function<QueryRunner<ResultRow>, ListenableFuture<AggregateResult>>() {

                    @Override
                    public ListenableFuture<AggregateResult> apply(final QueryRunner<ResultRow> input) {
                        if (input == null) {
                            throw new ISE("Null queryRunner! Looks to be some segment unmapping action happening");
                        }
                        ListenableFuture<AggregateResult> future = queryProcessingPool.submitRunnerTask(new AbstractPrioritizedQueryRunnerCallable<AggregateResult, ResultRow>(priority, input) {

                            @Override
                            public AggregateResult call() {
                                try (// These variables are used to close releasers automatically.
                                @SuppressWarnings("unused") Releaser bufferReleaser = mergeBufferHolder.increment();
                                    @SuppressWarnings("unused") Releaser grouperReleaser = grouperHolder.increment()) {
                                    // Return true if OK, false if resources were exhausted.
                                    return input.run(queryPlusForRunners, responseContext).accumulate(AggregateResult.ok(), accumulator);
                                } catch (QueryInterruptedException | QueryTimeoutException e) {
                                    throw e;
                                } catch (Exception e) {
                                    log.error(e, "Exception with one of the sequences!");
                                    throw new RuntimeException(e);
                                }
                            }
                        });
                        if (isSingleThreaded) {
                            waitForFutureCompletion(query, ImmutableList.of(future), hasTimeout, timeoutAt - System.currentTimeMillis());
                        }
                        return future;
                    }
                }));
                if (!isSingleThreaded) {
                    waitForFutureCompletion(query, futures, hasTimeout, timeoutAt - System.currentTimeMillis());
                }
                return RowBasedGrouperHelper.makeGrouperIterator(grouper, query, resources);
            } catch (Throwable t) {
                // Exception caught while setting up the iterator; release resources.
                try {
                    resources.close();
                } catch (Exception ex) {
                    t.addSuppressed(ex);
                }
                throw t;
            }
        }

        @Override
        public void cleanup(CloseableGrouperIterator<RowBasedKey, ResultRow> iterFromMake) {
            iterFromMake.close();
        }
    });
}
Also used : Accumulator(org.apache.druid.java.util.common.guava.Accumulator) AbstractPrioritizedQueryRunnerCallable(org.apache.druid.query.AbstractPrioritizedQueryRunnerCallable) ChainedExecutionQueryRunner(org.apache.druid.query.ChainedExecutionQueryRunner) QueryTimeoutException(org.apache.druid.query.QueryTimeoutException) GroupByQuery(org.apache.druid.query.groupby.GroupByQuery) Releaser(org.apache.druid.collections.Releaser) ImmutableList(com.google.common.collect.ImmutableList) List(java.util.List) ISE(org.apache.druid.java.util.common.ISE) Pair(org.apache.druid.java.util.common.Pair) QueryInterruptedException(org.apache.druid.query.QueryInterruptedException) ResultRow(org.apache.druid.query.groupby.ResultRow) Closer(org.apache.druid.java.util.common.io.Closer) GroupByQueryConfig(org.apache.druid.query.groupby.GroupByQueryConfig) RowBasedKey(org.apache.druid.query.groupby.epinephelinae.RowBasedGrouperHelper.RowBasedKey) ByteBuffer(java.nio.ByteBuffer) BaseSequence(org.apache.druid.java.util.common.guava.BaseSequence) ChainedExecutionQueryRunner(org.apache.druid.query.ChainedExecutionQueryRunner) QueryRunner(org.apache.druid.query.QueryRunner) TimeoutException(java.util.concurrent.TimeoutException) CancellationException(java.util.concurrent.CancellationException) QueryInterruptedException(org.apache.druid.query.QueryInterruptedException) ExecutionException(java.util.concurrent.ExecutionException) QueryTimeoutException(org.apache.druid.query.QueryTimeoutException) ResourceLimitExceededException(org.apache.druid.query.ResourceLimitExceededException) ReferenceCountingResourceHolder(org.apache.druid.collections.ReferenceCountingResourceHolder) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) File(java.io.File)

Example 9 with QueryTimeoutException

use of org.apache.druid.query.QueryTimeoutException in project druid by druid-io.

the class ScanQueryEngine method process.

public Sequence<ScanResultValue> process(final ScanQuery query, final Segment segment, final ResponseContext responseContext) {
    // "legacy" should be non-null due to toolChest.mergeResults
    final boolean legacy = Preconditions.checkNotNull(query.isLegacy(), "Expected non-null 'legacy' parameter");
    final Long numScannedRows = responseContext.getRowScanCount();
    if (numScannedRows != null && numScannedRows >= query.getScanRowsLimit() && query.getTimeOrder().equals(ScanQuery.Order.NONE)) {
        return Sequences.empty();
    }
    final boolean hasTimeout = QueryContexts.hasTimeout(query);
    final Long timeoutAt = responseContext.getTimeoutTime();
    final long start = System.currentTimeMillis();
    final StorageAdapter adapter = segment.asStorageAdapter();
    if (adapter == null) {
        throw new ISE("Null storage adapter found. Probably trying to issue a query against a segment being memory unmapped.");
    }
    final List<String> allColumns = new ArrayList<>();
    if (query.getColumns() != null && !query.getColumns().isEmpty()) {
        if (legacy && !query.getColumns().contains(LEGACY_TIMESTAMP_KEY)) {
            allColumns.add(LEGACY_TIMESTAMP_KEY);
        }
        // Unless we're in legacy mode, allColumns equals query.getColumns() exactly. This is nice since it makes
        // the compactedList form easier to use.
        allColumns.addAll(query.getColumns());
    } else {
        final Set<String> availableColumns = Sets.newLinkedHashSet(Iterables.concat(Collections.singleton(legacy ? LEGACY_TIMESTAMP_KEY : ColumnHolder.TIME_COLUMN_NAME), Iterables.transform(Arrays.asList(query.getVirtualColumns().getVirtualColumns()), VirtualColumn::getOutputName), adapter.getAvailableDimensions(), adapter.getAvailableMetrics()));
        allColumns.addAll(availableColumns);
        if (legacy) {
            allColumns.remove(ColumnHolder.TIME_COLUMN_NAME);
        }
    }
    final List<Interval> intervals = query.getQuerySegmentSpec().getIntervals();
    Preconditions.checkArgument(intervals.size() == 1, "Can only handle a single interval, got[%s]", intervals);
    final SegmentId segmentId = segment.getId();
    final Filter filter = Filters.convertToCNFFromQueryContext(query, Filters.toFilter(query.getFilter()));
    // If the row count is not set, set it to 0, else do nothing.
    responseContext.addRowScanCount(0);
    final long limit = calculateRemainingScanRowsLimit(query, responseContext);
    return Sequences.concat(adapter.makeCursors(filter, intervals.get(0), query.getVirtualColumns(), Granularities.ALL, query.getTimeOrder().equals(ScanQuery.Order.DESCENDING) || (query.getTimeOrder().equals(ScanQuery.Order.NONE) && query.isDescending()), null).map(cursor -> new BaseSequence<>(new BaseSequence.IteratorMaker<ScanResultValue, Iterator<ScanResultValue>>() {

        @Override
        public Iterator<ScanResultValue> make() {
            final List<BaseObjectColumnValueSelector> columnSelectors = new ArrayList<>(allColumns.size());
            for (String column : allColumns) {
                final BaseObjectColumnValueSelector selector;
                if (legacy && LEGACY_TIMESTAMP_KEY.equals(column)) {
                    selector = cursor.getColumnSelectorFactory().makeColumnValueSelector(ColumnHolder.TIME_COLUMN_NAME);
                } else {
                    selector = cursor.getColumnSelectorFactory().makeColumnValueSelector(column);
                }
                columnSelectors.add(selector);
            }
            final int batchSize = query.getBatchSize();
            return new Iterator<ScanResultValue>() {

                private long offset = 0;

                @Override
                public boolean hasNext() {
                    return !cursor.isDone() && offset < limit;
                }

                @Override
                public ScanResultValue next() {
                    if (!hasNext()) {
                        throw new NoSuchElementException();
                    }
                    if (hasTimeout && System.currentTimeMillis() >= timeoutAt) {
                        throw new QueryTimeoutException(StringUtils.nonStrictFormat("Query [%s] timed out", query.getId()));
                    }
                    final long lastOffset = offset;
                    final Object events;
                    final ScanQuery.ResultFormat resultFormat = query.getResultFormat();
                    if (ScanQuery.ResultFormat.RESULT_FORMAT_COMPACTED_LIST.equals(resultFormat)) {
                        events = rowsToCompactedList();
                    } else if (ScanQuery.ResultFormat.RESULT_FORMAT_LIST.equals(resultFormat)) {
                        events = rowsToList();
                    } else {
                        throw new UOE("resultFormat[%s] is not supported", resultFormat.toString());
                    }
                    responseContext.addRowScanCount(offset - lastOffset);
                    if (hasTimeout) {
                        responseContext.putTimeoutTime(timeoutAt - (System.currentTimeMillis() - start));
                    }
                    return new ScanResultValue(segmentId.toString(), allColumns, events);
                }

                @Override
                public void remove() {
                    throw new UnsupportedOperationException();
                }

                private List<List<Object>> rowsToCompactedList() {
                    final List<List<Object>> events = new ArrayList<>(batchSize);
                    final long iterLimit = Math.min(limit, offset + batchSize);
                    for (; !cursor.isDone() && offset < iterLimit; cursor.advance(), offset++) {
                        final List<Object> theEvent = new ArrayList<>(allColumns.size());
                        for (int j = 0; j < allColumns.size(); j++) {
                            theEvent.add(getColumnValue(j));
                        }
                        events.add(theEvent);
                    }
                    return events;
                }

                private List<Map<String, Object>> rowsToList() {
                    List<Map<String, Object>> events = Lists.newArrayListWithCapacity(batchSize);
                    final long iterLimit = Math.min(limit, offset + batchSize);
                    for (; !cursor.isDone() && offset < iterLimit; cursor.advance(), offset++) {
                        final Map<String, Object> theEvent = new LinkedHashMap<>();
                        for (int j = 0; j < allColumns.size(); j++) {
                            theEvent.put(allColumns.get(j), getColumnValue(j));
                        }
                        events.add(theEvent);
                    }
                    return events;
                }

                private Object getColumnValue(int i) {
                    final BaseObjectColumnValueSelector selector = columnSelectors.get(i);
                    final Object value;
                    if (legacy && allColumns.get(i).equals(LEGACY_TIMESTAMP_KEY)) {
                        value = DateTimes.utc((long) selector.getObject());
                    } else {
                        value = selector == null ? null : selector.getObject();
                    }
                    return value;
                }
            };
        }

        @Override
        public void cleanup(Iterator<ScanResultValue> iterFromMake) {
        }
    })));
}
Also used : Iterables(com.google.common.collect.Iterables) Arrays(java.util.Arrays) StorageAdapter(org.apache.druid.segment.StorageAdapter) ArrayList(java.util.ArrayList) LinkedHashMap(java.util.LinkedHashMap) Interval(org.joda.time.Interval) Lists(com.google.common.collect.Lists) ColumnHolder(org.apache.druid.segment.column.ColumnHolder) Map(java.util.Map) UOE(org.apache.druid.java.util.common.UOE) NoSuchElementException(java.util.NoSuchElementException) BaseObjectColumnValueSelector(org.apache.druid.segment.BaseObjectColumnValueSelector) Sequences(org.apache.druid.java.util.common.guava.Sequences) Segment(org.apache.druid.segment.Segment) DateTimes(org.apache.druid.java.util.common.DateTimes) Sequence(org.apache.druid.java.util.common.guava.Sequence) Iterator(java.util.Iterator) ResponseContext(org.apache.druid.query.context.ResponseContext) VirtualColumn(org.apache.druid.segment.VirtualColumn) StringUtils(org.apache.druid.java.util.common.StringUtils) Set(java.util.Set) ISE(org.apache.druid.java.util.common.ISE) Sets(com.google.common.collect.Sets) QueryContexts(org.apache.druid.query.QueryContexts) Granularities(org.apache.druid.java.util.common.granularity.Granularities) List(java.util.List) QueryTimeoutException(org.apache.druid.query.QueryTimeoutException) Preconditions(com.google.common.base.Preconditions) BaseSequence(org.apache.druid.java.util.common.guava.BaseSequence) SegmentId(org.apache.druid.timeline.SegmentId) Filters(org.apache.druid.segment.filter.Filters) Collections(java.util.Collections) Filter(org.apache.druid.query.filter.Filter) ArrayList(java.util.ArrayList) StorageAdapter(org.apache.druid.segment.StorageAdapter) QueryTimeoutException(org.apache.druid.query.QueryTimeoutException) Iterator(java.util.Iterator) ISE(org.apache.druid.java.util.common.ISE) ArrayList(java.util.ArrayList) List(java.util.List) SegmentId(org.apache.druid.timeline.SegmentId) BaseObjectColumnValueSelector(org.apache.druid.segment.BaseObjectColumnValueSelector) UOE(org.apache.druid.java.util.common.UOE) BaseSequence(org.apache.druid.java.util.common.guava.BaseSequence) Filter(org.apache.druid.query.filter.Filter) VirtualColumn(org.apache.druid.segment.VirtualColumn) LinkedHashMap(java.util.LinkedHashMap) Map(java.util.Map) NoSuchElementException(java.util.NoSuchElementException) Interval(org.joda.time.Interval)

Example 10 with QueryTimeoutException

use of org.apache.druid.query.QueryTimeoutException in project druid by druid-io.

the class SegmentMetadataQueryRunnerFactory method mergeRunners.

@Override
public QueryRunner<SegmentAnalysis> mergeRunners(QueryProcessingPool queryProcessingPool, Iterable<QueryRunner<SegmentAnalysis>> queryRunners) {
    return new ConcatQueryRunner<SegmentAnalysis>(Sequences.map(Sequences.simple(queryRunners), new Function<QueryRunner<SegmentAnalysis>, QueryRunner<SegmentAnalysis>>() {

        @Override
        public QueryRunner<SegmentAnalysis> apply(final QueryRunner<SegmentAnalysis> input) {
            return new QueryRunner<SegmentAnalysis>() {

                @Override
                public Sequence<SegmentAnalysis> run(final QueryPlus<SegmentAnalysis> queryPlus, final ResponseContext responseContext) {
                    final Query<SegmentAnalysis> query = queryPlus.getQuery();
                    final int priority = QueryContexts.getPriority(query);
                    final QueryPlus<SegmentAnalysis> threadSafeQueryPlus = queryPlus.withoutThreadUnsafeState();
                    ListenableFuture<Sequence<SegmentAnalysis>> future = queryProcessingPool.submitRunnerTask(new AbstractPrioritizedQueryRunnerCallable<Sequence<SegmentAnalysis>, SegmentAnalysis>(priority, input) {

                        @Override
                        public Sequence<SegmentAnalysis> call() {
                            return Sequences.simple(input.run(threadSafeQueryPlus, responseContext).toList());
                        }
                    });
                    try {
                        queryWatcher.registerQueryFuture(query, future);
                        if (QueryContexts.hasTimeout(query)) {
                            return future.get(QueryContexts.getTimeout(query), TimeUnit.MILLISECONDS);
                        } else {
                            return future.get();
                        }
                    } catch (InterruptedException e) {
                        log.warn(e, "Query interrupted, cancelling pending results, query id [%s]", query.getId());
                        future.cancel(true);
                        throw new QueryInterruptedException(e);
                    } catch (CancellationException e) {
                        throw new QueryInterruptedException(e);
                    } catch (TimeoutException e) {
                        log.info("Query timeout, cancelling pending results for query id [%s]", query.getId());
                        future.cancel(true);
                        throw new QueryTimeoutException(StringUtils.nonStrictFormat("Query [%s] timed out", query.getId()));
                    } catch (ExecutionException e) {
                        throw new RuntimeException(e);
                    }
                }
            };
        }
    }));
}
Also used : Sequence(org.apache.druid.java.util.common.guava.Sequence) QueryInterruptedException(org.apache.druid.query.QueryInterruptedException) ConcatQueryRunner(org.apache.druid.query.ConcatQueryRunner) QueryRunner(org.apache.druid.query.QueryRunner) Function(com.google.common.base.Function) QueryTimeoutException(org.apache.druid.query.QueryTimeoutException) CancellationException(java.util.concurrent.CancellationException) ResponseContext(org.apache.druid.query.context.ResponseContext) ConcatQueryRunner(org.apache.druid.query.ConcatQueryRunner) SegmentAnalysis(org.apache.druid.query.metadata.metadata.SegmentAnalysis) ExecutionException(java.util.concurrent.ExecutionException) QueryPlus(org.apache.druid.query.QueryPlus) QueryInterruptedException(org.apache.druid.query.QueryInterruptedException) TimeoutException(java.util.concurrent.TimeoutException) QueryTimeoutException(org.apache.druid.query.QueryTimeoutException)

Aggregations

QueryTimeoutException (org.apache.druid.query.QueryTimeoutException)15 QueryInterruptedException (org.apache.druid.query.QueryInterruptedException)7 IOException (java.io.IOException)5 Test (org.junit.Test)5 CancellationException (java.util.concurrent.CancellationException)4 ExecutionException (java.util.concurrent.ExecutionException)4 TimeoutException (java.util.concurrent.TimeoutException)4 Sequence (org.apache.druid.java.util.common.guava.Sequence)4 ResourceLimitExceededException (org.apache.druid.query.ResourceLimitExceededException)4 ResponseContext (org.apache.druid.query.context.ResponseContext)4 List (java.util.List)3 Response (javax.ws.rs.core.Response)3 JsonProcessingException (com.fasterxml.jackson.core.JsonProcessingException)2 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)2 Preconditions (com.google.common.base.Preconditions)2 ImmutableList (com.google.common.collect.ImmutableList)2 CountingOutputStream (com.google.common.io.CountingOutputStream)2 ListenableFuture (com.google.common.util.concurrent.ListenableFuture)2 ByteBuffer (java.nio.ByteBuffer)2 LinkedHashMap (java.util.LinkedHashMap)2