Search in sources :

Example 1 with ResourceLimitExceededException

use of org.apache.druid.query.ResourceLimitExceededException in project druid by druid-io.

the class SqlResource method doPost.

@POST
@Produces(MediaType.APPLICATION_JSON)
@Consumes(MediaType.APPLICATION_JSON)
public Response doPost(final SqlQuery sqlQuery, @Context final HttpServletRequest req) throws IOException {
    final SqlLifecycle lifecycle = sqlLifecycleFactory.factorize();
    final String sqlQueryId = lifecycle.initialize(sqlQuery.getQuery(), sqlQuery.getContext());
    final String remoteAddr = req.getRemoteAddr();
    final String currThreadName = Thread.currentThread().getName();
    try {
        Thread.currentThread().setName(StringUtils.format("sql[%s]", sqlQueryId));
        lifecycle.setParameters(sqlQuery.getParameterList());
        lifecycle.validateAndAuthorize(req);
        // must add after lifecycle is authorized
        sqlLifecycleManager.add(sqlQueryId, lifecycle);
        lifecycle.plan();
        final SqlRowTransformer rowTransformer = lifecycle.createRowTransformer();
        final Sequence<Object[]> sequence = lifecycle.execute();
        final Yielder<Object[]> yielder0 = Yielders.each(sequence);
        try {
            final Response.ResponseBuilder responseBuilder = Response.ok((StreamingOutput) outputStream -> {
                Exception e = null;
                CountingOutputStream os = new CountingOutputStream(outputStream);
                Yielder<Object[]> yielder = yielder0;
                try (final ResultFormat.Writer writer = sqlQuery.getResultFormat().createFormatter(os, jsonMapper)) {
                    writer.writeResponseStart();
                    if (sqlQuery.includeHeader()) {
                        writer.writeHeader(rowTransformer.getRowType(), sqlQuery.includeTypesHeader(), sqlQuery.includeSqlTypesHeader());
                    }
                    while (!yielder.isDone()) {
                        final Object[] row = yielder.get();
                        writer.writeRowStart();
                        for (int i = 0; i < rowTransformer.getFieldList().size(); i++) {
                            final Object value = rowTransformer.transform(row, i);
                            writer.writeRowField(rowTransformer.getFieldList().get(i), value);
                        }
                        writer.writeRowEnd();
                        yielder = yielder.next(null);
                    }
                    writer.writeResponseEnd();
                } catch (Exception ex) {
                    e = ex;
                    log.error(ex, "Unable to send SQL response [%s]", sqlQueryId);
                    throw new RuntimeException(ex);
                } finally {
                    yielder.close();
                    endLifecycle(sqlQueryId, lifecycle, e, remoteAddr, os.getCount());
                }
            }).header(SQL_QUERY_ID_RESPONSE_HEADER, sqlQueryId);
            if (sqlQuery.includeHeader()) {
                responseBuilder.header(SQL_HEADER_RESPONSE_HEADER, SQL_HEADER_VALUE);
            }
            return responseBuilder.build();
        } catch (Throwable e) {
            // make sure to close yielder if anything happened before starting to serialize the response.
            yielder0.close();
            throw new RuntimeException(e);
        }
    } catch (QueryCapacityExceededException cap) {
        endLifecycle(sqlQueryId, lifecycle, cap, remoteAddr, -1);
        return buildNonOkResponse(QueryCapacityExceededException.STATUS_CODE, cap, sqlQueryId);
    } catch (QueryUnsupportedException unsupported) {
        endLifecycle(sqlQueryId, lifecycle, unsupported, remoteAddr, -1);
        return buildNonOkResponse(QueryUnsupportedException.STATUS_CODE, unsupported, sqlQueryId);
    } catch (QueryTimeoutException timeout) {
        endLifecycle(sqlQueryId, lifecycle, timeout, remoteAddr, -1);
        return buildNonOkResponse(QueryTimeoutException.STATUS_CODE, timeout, sqlQueryId);
    } catch (SqlPlanningException | ResourceLimitExceededException e) {
        endLifecycle(sqlQueryId, lifecycle, e, remoteAddr, -1);
        return buildNonOkResponse(BadQueryException.STATUS_CODE, e, sqlQueryId);
    } catch (ForbiddenException e) {
        endLifecycleWithoutEmittingMetrics(sqlQueryId, lifecycle);
        throw (ForbiddenException) serverConfig.getErrorResponseTransformStrategy().transformIfNeeded(// let ForbiddenExceptionMapper handle this
        e);
    } catch (RelOptPlanner.CannotPlanException e) {
        endLifecycle(sqlQueryId, lifecycle, e, remoteAddr, -1);
        SqlPlanningException spe = new SqlPlanningException(SqlPlanningException.PlanningError.UNSUPPORTED_SQL_ERROR, e.getMessage());
        return buildNonOkResponse(BadQueryException.STATUS_CODE, spe, sqlQueryId);
    }// calcite throws a java.lang.AssertionError which is type error not exception. using throwable will catch all
     catch (Throwable e) {
        log.warn(e, "Failed to handle query: %s", sqlQuery);
        endLifecycle(sqlQueryId, lifecycle, e, remoteAddr, -1);
        return buildNonOkResponse(Status.INTERNAL_SERVER_ERROR.getStatusCode(), QueryInterruptedException.wrapIfNeeded(e), sqlQueryId);
    } finally {
        Thread.currentThread().setName(currThreadName);
    }
}
Also used : SqlRowTransformer(org.apache.druid.sql.SqlRowTransformer) StreamingOutput(javax.ws.rs.core.StreamingOutput) RelOptPlanner(org.apache.calcite.plan.RelOptPlanner) QueryTimeoutException(org.apache.druid.query.QueryTimeoutException) CountingOutputStream(com.google.common.io.CountingOutputStream) SqlPlanningException(org.apache.druid.sql.SqlPlanningException) ForbiddenException(org.apache.druid.server.security.ForbiddenException) QueryCapacityExceededException(org.apache.druid.query.QueryCapacityExceededException) QueryUnsupportedException(org.apache.druid.query.QueryUnsupportedException) SqlLifecycle(org.apache.druid.sql.SqlLifecycle) BadQueryException(org.apache.druid.query.BadQueryException) QueryCapacityExceededException(org.apache.druid.query.QueryCapacityExceededException) SqlPlanningException(org.apache.druid.sql.SqlPlanningException) ForbiddenException(org.apache.druid.server.security.ForbiddenException) SanitizableException(org.apache.druid.common.exception.SanitizableException) QueryInterruptedException(org.apache.druid.query.QueryInterruptedException) JsonProcessingException(com.fasterxml.jackson.core.JsonProcessingException) IOException(java.io.IOException) QueryTimeoutException(org.apache.druid.query.QueryTimeoutException) ResourceLimitExceededException(org.apache.druid.query.ResourceLimitExceededException) QueryUnsupportedException(org.apache.druid.query.QueryUnsupportedException) Response(javax.ws.rs.core.Response) ResourceLimitExceededException(org.apache.druid.query.ResourceLimitExceededException) POST(javax.ws.rs.POST) Produces(javax.ws.rs.Produces) Consumes(javax.ws.rs.Consumes)

Example 2 with ResourceLimitExceededException

use of org.apache.druid.query.ResourceLimitExceededException in project druid by druid-io.

the class QueryResource method doPost.

@POST
@Produces({ MediaType.APPLICATION_JSON, SmileMediaTypes.APPLICATION_JACKSON_SMILE })
@Consumes({ MediaType.APPLICATION_JSON, SmileMediaTypes.APPLICATION_JACKSON_SMILE, APPLICATION_SMILE })
public Response doPost(final InputStream in, @QueryParam("pretty") final String pretty, // used to get request content-type,Accept header, remote address and auth-related headers
@Context final HttpServletRequest req) throws IOException {
    final QueryLifecycle queryLifecycle = queryLifecycleFactory.factorize();
    Query<?> query = null;
    final ResourceIOReaderWriter ioReaderWriter = createResourceIOReaderWriter(req, pretty != null);
    final String currThreadName = Thread.currentThread().getName();
    try {
        queryLifecycle.initialize(readQuery(req, in, ioReaderWriter));
        query = queryLifecycle.getQuery();
        final String queryId = query.getId();
        final String queryThreadName = StringUtils.format("%s[%s_%s_%s]", currThreadName, query.getType(), query.getDataSource().getTableNames(), queryId);
        Thread.currentThread().setName(queryThreadName);
        if (log.isDebugEnabled()) {
            log.debug("Got query [%s]", query);
        }
        final Access authResult = queryLifecycle.authorize(req);
        if (!authResult.isAllowed()) {
            throw new ForbiddenException(authResult.toString());
        }
        final QueryLifecycle.QueryResponse queryResponse = queryLifecycle.execute();
        final Sequence<?> results = queryResponse.getResults();
        final ResponseContext responseContext = queryResponse.getResponseContext();
        final String prevEtag = getPreviousEtag(req);
        if (prevEtag != null && prevEtag.equals(responseContext.getEntityTag())) {
            queryLifecycle.emitLogsAndMetrics(null, req.getRemoteAddr(), -1);
            successfulQueryCount.incrementAndGet();
            return Response.notModified().build();
        }
        final Yielder<?> yielder = Yielders.each(results);
        try {
            boolean shouldFinalize = QueryContexts.isFinalize(query, true);
            boolean serializeDateTimeAsLong = QueryContexts.isSerializeDateTimeAsLong(query, false) || (!shouldFinalize && QueryContexts.isSerializeDateTimeAsLongInner(query, false));
            final ObjectWriter jsonWriter = ioReaderWriter.getResponseWriter().newOutputWriter(queryLifecycle.getToolChest(), queryLifecycle.getQuery(), serializeDateTimeAsLong);
            Response.ResponseBuilder responseBuilder = Response.ok(new StreamingOutput() {

                @Override
                public void write(OutputStream outputStream) throws WebApplicationException {
                    Exception e = null;
                    CountingOutputStream os = new CountingOutputStream(outputStream);
                    try {
                        // json serializer will always close the yielder
                        jsonWriter.writeValue(os, yielder);
                        // Some types of OutputStream suppress flush errors in the .close() method.
                        os.flush();
                        os.close();
                    } catch (Exception ex) {
                        e = ex;
                        log.noStackTrace().error(ex, "Unable to send query response.");
                        throw new RuntimeException(ex);
                    } finally {
                        Thread.currentThread().setName(currThreadName);
                        queryLifecycle.emitLogsAndMetrics(e, req.getRemoteAddr(), os.getCount());
                        if (e == null) {
                            successfulQueryCount.incrementAndGet();
                        } else {
                            failedQueryCount.incrementAndGet();
                        }
                    }
                }
            }, ioReaderWriter.getResponseWriter().getResponseType()).header("X-Druid-Query-Id", queryId);
            transferEntityTag(responseContext, responseBuilder);
            DirectDruidClient.removeMagicResponseContextFields(responseContext);
            // Limit the response-context header, see https://github.com/apache/druid/issues/2331
            // Note that Response.ResponseBuilder.header(String key,Object value).build() calls value.toString()
            // and encodes the string using ASCII, so 1 char is = 1 byte
            final ResponseContext.SerializationResult serializationResult = responseContext.serializeWith(jsonMapper, responseContextConfig.getMaxResponseContextHeaderSize());
            if (serializationResult.isTruncated()) {
                final String logToPrint = StringUtils.format("Response Context truncated for id [%s]. Full context is [%s].", queryId, serializationResult.getFullResult());
                if (responseContextConfig.shouldFailOnTruncatedResponseContext()) {
                    log.error(logToPrint);
                    throw new QueryInterruptedException(new TruncatedResponseContextException("Serialized response context exceeds the max size[%s]", responseContextConfig.getMaxResponseContextHeaderSize()), selfNode.getHostAndPortToUse());
                } else {
                    log.warn(logToPrint);
                }
            }
            return responseBuilder.header(HEADER_RESPONSE_CONTEXT, serializationResult.getResult()).build();
        } catch (QueryException e) {
            // make sure to close yielder if anything happened before starting to serialize the response.
            yielder.close();
            throw e;
        } catch (Exception e) {
            // make sure to close yielder if anything happened before starting to serialize the response.
            yielder.close();
            throw new RuntimeException(e);
        } finally {
        // do not close yielder here, since we do not want to close the yielder prior to
        // StreamingOutput having iterated over all the results
        }
    } catch (QueryInterruptedException e) {
        interruptedQueryCount.incrementAndGet();
        queryLifecycle.emitLogsAndMetrics(e, req.getRemoteAddr(), -1);
        return ioReaderWriter.getResponseWriter().gotError(e);
    } catch (QueryTimeoutException timeout) {
        timedOutQueryCount.incrementAndGet();
        queryLifecycle.emitLogsAndMetrics(timeout, req.getRemoteAddr(), -1);
        return ioReaderWriter.getResponseWriter().gotTimeout(timeout);
    } catch (QueryCapacityExceededException cap) {
        failedQueryCount.incrementAndGet();
        queryLifecycle.emitLogsAndMetrics(cap, req.getRemoteAddr(), -1);
        return ioReaderWriter.getResponseWriter().gotLimited(cap);
    } catch (QueryUnsupportedException unsupported) {
        failedQueryCount.incrementAndGet();
        queryLifecycle.emitLogsAndMetrics(unsupported, req.getRemoteAddr(), -1);
        return ioReaderWriter.getResponseWriter().gotUnsupported(unsupported);
    } catch (BadJsonQueryException | ResourceLimitExceededException e) {
        interruptedQueryCount.incrementAndGet();
        queryLifecycle.emitLogsAndMetrics(e, req.getRemoteAddr(), -1);
        return ioReaderWriter.getResponseWriter().gotBadQuery(e);
    } catch (ForbiddenException e) {
        // send an error response if this is thrown.
        throw e;
    } catch (Exception e) {
        failedQueryCount.incrementAndGet();
        queryLifecycle.emitLogsAndMetrics(e, req.getRemoteAddr(), -1);
        log.noStackTrace().makeAlert(e, "Exception handling request").addData("query", query != null ? jsonMapper.writeValueAsString(query) : "unparseable query").addData("peer", req.getRemoteAddr()).emit();
        return ioReaderWriter.getResponseWriter().gotError(e);
    } finally {
        Thread.currentThread().setName(currThreadName);
    }
}
Also used : CountingOutputStream(com.google.common.io.CountingOutputStream) OutputStream(java.io.OutputStream) Access(org.apache.druid.server.security.Access) StreamingOutput(javax.ws.rs.core.StreamingOutput) QueryTimeoutException(org.apache.druid.query.QueryTimeoutException) CountingOutputStream(com.google.common.io.CountingOutputStream) ResponseContext(org.apache.druid.query.context.ResponseContext) QueryInterruptedException(org.apache.druid.query.QueryInterruptedException) ForbiddenException(org.apache.druid.server.security.ForbiddenException) QueryCapacityExceededException(org.apache.druid.query.QueryCapacityExceededException) TruncatedResponseContextException(org.apache.druid.query.TruncatedResponseContextException) QueryUnsupportedException(org.apache.druid.query.QueryUnsupportedException) ObjectWriter(com.fasterxml.jackson.databind.ObjectWriter) BadJsonQueryException(org.apache.druid.query.BadJsonQueryException) ForbiddenException(org.apache.druid.server.security.ForbiddenException) JsonParseException(com.fasterxml.jackson.core.JsonParseException) QueryTimeoutException(org.apache.druid.query.QueryTimeoutException) WebApplicationException(javax.ws.rs.WebApplicationException) QueryException(org.apache.druid.query.QueryException) BadQueryException(org.apache.druid.query.BadQueryException) QueryCapacityExceededException(org.apache.druid.query.QueryCapacityExceededException) QueryInterruptedException(org.apache.druid.query.QueryInterruptedException) JsonProcessingException(com.fasterxml.jackson.core.JsonProcessingException) IOException(java.io.IOException) TruncatedResponseContextException(org.apache.druid.query.TruncatedResponseContextException) ResourceLimitExceededException(org.apache.druid.query.ResourceLimitExceededException) QueryUnsupportedException(org.apache.druid.query.QueryUnsupportedException) Response(javax.ws.rs.core.Response) BadJsonQueryException(org.apache.druid.query.BadJsonQueryException) BadJsonQueryException(org.apache.druid.query.BadJsonQueryException) QueryException(org.apache.druid.query.QueryException) BadQueryException(org.apache.druid.query.BadQueryException) ResourceLimitExceededException(org.apache.druid.query.ResourceLimitExceededException) POST(javax.ws.rs.POST) Produces(javax.ws.rs.Produces) Consumes(javax.ws.rs.Consumes)

Example 3 with ResourceLimitExceededException

use of org.apache.druid.query.ResourceLimitExceededException in project druid by druid-io.

the class DirectDruidClient method run.

@Override
public Sequence<T> run(final QueryPlus<T> queryPlus, final ResponseContext context) {
    final Query<T> query = queryPlus.getQuery();
    QueryToolChest<T, Query<T>> toolChest = warehouse.getToolChest(query);
    boolean isBySegment = QueryContexts.isBySegment(query);
    final JavaType queryResultType = isBySegment ? toolChest.getBySegmentResultType() : toolChest.getBaseResultType();
    final ListenableFuture<InputStream> future;
    final String url = scheme + "://" + host + "/druid/v2/";
    final String cancelUrl = url + query.getId();
    try {
        log.debug("Querying queryId[%s] url[%s]", query.getId(), url);
        final long requestStartTimeNs = System.nanoTime();
        final long timeoutAt = query.getContextValue(QUERY_FAIL_TIME);
        final long maxScatterGatherBytes = QueryContexts.getMaxScatterGatherBytes(query);
        final AtomicLong totalBytesGathered = context.getTotalBytes();
        final long maxQueuedBytes = QueryContexts.getMaxQueuedBytes(query, 0);
        final boolean usingBackpressure = maxQueuedBytes > 0;
        final HttpResponseHandler<InputStream, InputStream> responseHandler = new HttpResponseHandler<InputStream, InputStream>() {

            private final AtomicLong totalByteCount = new AtomicLong(0);

            private final AtomicLong queuedByteCount = new AtomicLong(0);

            private final AtomicLong channelSuspendedTime = new AtomicLong(0);

            private final BlockingQueue<InputStreamHolder> queue = new LinkedBlockingQueue<>();

            private final AtomicBoolean done = new AtomicBoolean(false);

            private final AtomicReference<String> fail = new AtomicReference<>();

            private final AtomicReference<TrafficCop> trafficCopRef = new AtomicReference<>();

            private QueryMetrics<? super Query<T>> queryMetrics;

            private long responseStartTimeNs;

            private QueryMetrics<? super Query<T>> acquireResponseMetrics() {
                if (queryMetrics == null) {
                    queryMetrics = toolChest.makeMetrics(query);
                    queryMetrics.server(host);
                }
                return queryMetrics;
            }

            /**
             * Queue a buffer. Returns true if we should keep reading, false otherwise.
             */
            private boolean enqueue(ChannelBuffer buffer, long chunkNum) throws InterruptedException {
                // Increment queuedByteCount before queueing the object, so queuedByteCount is at least as high as
                // the actual number of queued bytes at any particular time.
                final InputStreamHolder holder = InputStreamHolder.fromChannelBuffer(buffer, chunkNum);
                final long currentQueuedByteCount = queuedByteCount.addAndGet(holder.getLength());
                queue.put(holder);
                // True if we should keep reading.
                return !usingBackpressure || currentQueuedByteCount < maxQueuedBytes;
            }

            private InputStream dequeue() throws InterruptedException {
                final InputStreamHolder holder = queue.poll(checkQueryTimeout(), TimeUnit.MILLISECONDS);
                if (holder == null) {
                    throw new QueryTimeoutException(StringUtils.nonStrictFormat("Query[%s] url[%s] timed out.", query.getId(), url));
                }
                final long currentQueuedByteCount = queuedByteCount.addAndGet(-holder.getLength());
                if (usingBackpressure && currentQueuedByteCount < maxQueuedBytes) {
                    long backPressureTime = Preconditions.checkNotNull(trafficCopRef.get(), "No TrafficCop, how can this be?").resume(holder.getChunkNum());
                    channelSuspendedTime.addAndGet(backPressureTime);
                }
                return holder.getStream();
            }

            @Override
            public ClientResponse<InputStream> handleResponse(HttpResponse response, TrafficCop trafficCop) {
                trafficCopRef.set(trafficCop);
                checkQueryTimeout();
                checkTotalBytesLimit(response.getContent().readableBytes());
                log.debug("Initial response from url[%s] for queryId[%s]", url, query.getId());
                responseStartTimeNs = System.nanoTime();
                acquireResponseMetrics().reportNodeTimeToFirstByte(responseStartTimeNs - requestStartTimeNs).emit(emitter);
                final boolean continueReading;
                try {
                    log.trace("Got a response from [%s] for query ID[%s], subquery ID[%s]", url, query.getId(), query.getSubQueryId());
                    final String responseContext = response.headers().get(QueryResource.HEADER_RESPONSE_CONTEXT);
                    context.addRemainingResponse(query.getMostSpecificId(), VAL_TO_REDUCE_REMAINING_RESPONSES);
                    // context may be null in case of error or query timeout
                    if (responseContext != null) {
                        context.merge(ResponseContext.deserialize(responseContext, objectMapper));
                    }
                    continueReading = enqueue(response.getContent(), 0L);
                } catch (final IOException e) {
                    log.error(e, "Error parsing response context from url [%s]", url);
                    return ClientResponse.finished(new InputStream() {

                        @Override
                        public int read() throws IOException {
                            throw e;
                        }
                    });
                } catch (InterruptedException e) {
                    log.error(e, "Queue appending interrupted");
                    Thread.currentThread().interrupt();
                    throw new RuntimeException(e);
                }
                totalByteCount.addAndGet(response.getContent().readableBytes());
                return ClientResponse.finished(new SequenceInputStream(new Enumeration<InputStream>() {

                    @Override
                    public boolean hasMoreElements() {
                        if (fail.get() != null) {
                            throw new RE(fail.get());
                        }
                        checkQueryTimeout();
                        // Then the stream should be spouting good InputStreams.
                        synchronized (done) {
                            return !done.get() || !queue.isEmpty();
                        }
                    }

                    @Override
                    public InputStream nextElement() {
                        if (fail.get() != null) {
                            throw new RE(fail.get());
                        }
                        try {
                            return dequeue();
                        } catch (InterruptedException e) {
                            Thread.currentThread().interrupt();
                            throw new RuntimeException(e);
                        }
                    }
                }), continueReading);
            }

            @Override
            public ClientResponse<InputStream> handleChunk(ClientResponse<InputStream> clientResponse, HttpChunk chunk, long chunkNum) {
                checkQueryTimeout();
                final ChannelBuffer channelBuffer = chunk.getContent();
                final int bytes = channelBuffer.readableBytes();
                checkTotalBytesLimit(bytes);
                boolean continueReading = true;
                if (bytes > 0) {
                    try {
                        continueReading = enqueue(channelBuffer, chunkNum);
                    } catch (InterruptedException e) {
                        log.error(e, "Unable to put finalizing input stream into Sequence queue for url [%s]", url);
                        Thread.currentThread().interrupt();
                        throw new RuntimeException(e);
                    }
                    totalByteCount.addAndGet(bytes);
                }
                return ClientResponse.finished(clientResponse.getObj(), continueReading);
            }

            @Override
            public ClientResponse<InputStream> done(ClientResponse<InputStream> clientResponse) {
                long stopTimeNs = System.nanoTime();
                long nodeTimeNs = stopTimeNs - requestStartTimeNs;
                final long nodeTimeMs = TimeUnit.NANOSECONDS.toMillis(nodeTimeNs);
                log.debug("Completed queryId[%s] request to url[%s] with %,d bytes returned in %,d millis [%,f b/s].", query.getId(), url, totalByteCount.get(), nodeTimeMs, // Floating math; division by zero will yield Inf, not exception
                totalByteCount.get() / (0.001 * nodeTimeMs));
                QueryMetrics<? super Query<T>> responseMetrics = acquireResponseMetrics();
                responseMetrics.reportNodeTime(nodeTimeNs);
                responseMetrics.reportNodeBytes(totalByteCount.get());
                if (usingBackpressure) {
                    responseMetrics.reportBackPressureTime(channelSuspendedTime.get());
                }
                responseMetrics.emit(emitter);
                synchronized (done) {
                    try {
                        // An empty byte array is put at the end to give the SequenceInputStream.close() as something to close out
                        // after done is set to true, regardless of the rest of the stream's state.
                        queue.put(InputStreamHolder.fromChannelBuffer(ChannelBuffers.EMPTY_BUFFER, Long.MAX_VALUE));
                    } catch (InterruptedException e) {
                        log.error(e, "Unable to put finalizing input stream into Sequence queue for url [%s]", url);
                        Thread.currentThread().interrupt();
                        throw new RuntimeException(e);
                    } finally {
                        done.set(true);
                    }
                }
                return ClientResponse.finished(clientResponse.getObj());
            }

            @Override
            public void exceptionCaught(final ClientResponse<InputStream> clientResponse, final Throwable e) {
                String msg = StringUtils.format("Query[%s] url[%s] failed with exception msg [%s]", query.getId(), url, e.getMessage());
                setupResponseReadFailure(msg, e);
            }

            private void setupResponseReadFailure(String msg, Throwable th) {
                fail.set(msg);
                queue.clear();
                queue.offer(InputStreamHolder.fromStream(new InputStream() {

                    @Override
                    public int read() throws IOException {
                        if (th != null) {
                            throw new IOException(msg, th);
                        } else {
                            throw new IOException(msg);
                        }
                    }
                }, -1, 0));
            }

            // Returns remaining timeout or throws exception if timeout already elapsed.
            private long checkQueryTimeout() {
                long timeLeft = timeoutAt - System.currentTimeMillis();
                if (timeLeft <= 0) {
                    String msg = StringUtils.format("Query[%s] url[%s] timed out.", query.getId(), url);
                    setupResponseReadFailure(msg, null);
                    throw new QueryTimeoutException(msg);
                } else {
                    return timeLeft;
                }
            }

            private void checkTotalBytesLimit(long bytes) {
                if (maxScatterGatherBytes < Long.MAX_VALUE && totalBytesGathered.addAndGet(bytes) > maxScatterGatherBytes) {
                    String msg = StringUtils.format("Query[%s] url[%s] max scatter-gather bytes limit reached.", query.getId(), url);
                    setupResponseReadFailure(msg, null);
                    throw new ResourceLimitExceededException(msg);
                }
            }
        };
        long timeLeft = timeoutAt - System.currentTimeMillis();
        if (timeLeft <= 0) {
            throw new QueryTimeoutException(StringUtils.nonStrictFormat("Query[%s] url[%s] timed out.", query.getId(), url));
        }
        future = httpClient.go(new Request(HttpMethod.POST, new URL(url)).setContent(objectMapper.writeValueAsBytes(QueryContexts.withTimeout(query, timeLeft))).setHeader(HttpHeaders.Names.CONTENT_TYPE, isSmile ? SmileMediaTypes.APPLICATION_JACKSON_SMILE : MediaType.APPLICATION_JSON), responseHandler, Duration.millis(timeLeft));
        queryWatcher.registerQueryFuture(query, future);
        openConnections.getAndIncrement();
        Futures.addCallback(future, new FutureCallback<InputStream>() {

            @Override
            public void onSuccess(InputStream result) {
                openConnections.getAndDecrement();
            }

            @Override
            public void onFailure(Throwable t) {
                openConnections.getAndDecrement();
                if (future.isCancelled()) {
                    cancelQuery(query, cancelUrl);
                }
            }
        }, // The callback is non-blocking and quick, so it's OK to schedule it using directExecutor()
        Execs.directExecutor());
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
    Sequence<T> retVal = new BaseSequence<>(new BaseSequence.IteratorMaker<T, JsonParserIterator<T>>() {

        @Override
        public JsonParserIterator<T> make() {
            return new JsonParserIterator<T>(queryResultType, future, url, query, host, toolChest.decorateObjectMapper(objectMapper, query));
        }

        @Override
        public void cleanup(JsonParserIterator<T> iterFromMake) {
            CloseableUtils.closeAndWrapExceptions(iterFromMake);
        }
    });
    // avoid the cost of de-serializing and then re-serializing again when adding to cache
    if (!isBySegment) {
        retVal = Sequences.map(retVal, toolChest.makePreComputeManipulatorFn(query, MetricManipulatorFns.deserializing()));
    }
    return retVal;
}
Also used : ClientResponse(org.apache.druid.java.util.http.client.response.ClientResponse) Query(org.apache.druid.query.Query) URL(java.net.URL) ChannelBuffer(org.jboss.netty.buffer.ChannelBuffer) QueryTimeoutException(org.apache.druid.query.QueryTimeoutException) BlockingQueue(java.util.concurrent.BlockingQueue) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) Enumeration(java.util.Enumeration) SequenceInputStream(java.io.SequenceInputStream) InputStream(java.io.InputStream) Request(org.apache.druid.java.util.http.client.Request) HttpResponse(org.jboss.netty.handler.codec.http.HttpResponse) AtomicReference(java.util.concurrent.atomic.AtomicReference) IOException(java.io.IOException) BaseSequence(org.apache.druid.java.util.common.guava.BaseSequence) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) JavaType(com.fasterxml.jackson.databind.JavaType) AtomicLong(java.util.concurrent.atomic.AtomicLong) RE(org.apache.druid.java.util.common.RE) ResourceLimitExceededException(org.apache.druid.query.ResourceLimitExceededException) SequenceInputStream(java.io.SequenceInputStream) QueryMetrics(org.apache.druid.query.QueryMetrics) HttpResponseHandler(org.apache.druid.java.util.http.client.response.HttpResponseHandler) HttpChunk(org.jboss.netty.handler.codec.http.HttpChunk)

Example 4 with ResourceLimitExceededException

use of org.apache.druid.query.ResourceLimitExceededException in project druid by druid-io.

the class GroupByRowProcessor method process.

/**
 * Process the input of sequence "rows" (output by "subquery") based on "query" and returns a {@link ResultSupplier}.
 *
 * In addition to grouping using dimensions and metrics, it will also apply filters (both DimFilter and interval
 * filters).
 *
 * The input sequence is processed synchronously with the call to this method, and result iteration happens lazy upon
 * calls to the {@link ResultSupplier}. Make sure to close it when you're done.
 */
public static ResultSupplier process(final GroupByQuery query, final GroupByQuery subquery, final Sequence<ResultRow> rows, final GroupByQueryConfig config, final GroupByQueryResource resource, final ObjectMapper spillMapper, final String processingTmpDir, final int mergeBufferSize) {
    final Closer closeOnExit = Closer.create();
    final GroupByQueryConfig querySpecificConfig = config.withOverrides(query);
    final File temporaryStorageDirectory = new File(processingTmpDir, StringUtils.format("druid-groupBy-%s_%s", UUID.randomUUID(), query.getId()));
    final LimitedTemporaryStorage temporaryStorage = new LimitedTemporaryStorage(temporaryStorageDirectory, querySpecificConfig.getMaxOnDiskStorage());
    closeOnExit.register(temporaryStorage);
    Pair<Grouper<RowBasedKey>, Accumulator<AggregateResult, ResultRow>> pair = RowBasedGrouperHelper.createGrouperAccumulatorPair(query, subquery, querySpecificConfig, new Supplier<ByteBuffer>() {

        @Override
        public ByteBuffer get() {
            final ResourceHolder<ByteBuffer> mergeBufferHolder = resource.getMergeBuffer();
            closeOnExit.register(mergeBufferHolder);
            return mergeBufferHolder.get();
        }
    }, temporaryStorage, spillMapper, mergeBufferSize);
    final Grouper<RowBasedKey> grouper = pair.lhs;
    final Accumulator<AggregateResult, ResultRow> accumulator = pair.rhs;
    closeOnExit.register(grouper);
    final AggregateResult retVal = rows.accumulate(AggregateResult.ok(), accumulator);
    if (!retVal.isOk()) {
        throw new ResourceLimitExceededException(retVal.getReason());
    }
    return new ResultSupplier() {

        @Override
        public Sequence<ResultRow> results(@Nullable List<DimensionSpec> dimensionsToInclude) {
            return getRowsFromGrouper(query, grouper, dimensionsToInclude);
        }

        @Override
        public void close() throws IOException {
            closeOnExit.close();
        }
    };
}
Also used : Closer(org.apache.druid.java.util.common.io.Closer) Accumulator(org.apache.druid.java.util.common.guava.Accumulator) ResultRow(org.apache.druid.query.groupby.ResultRow) GroupByQueryConfig(org.apache.druid.query.groupby.GroupByQueryConfig) ResourceHolder(org.apache.druid.collections.ResourceHolder) RowBasedKey(org.apache.druid.query.groupby.epinephelinae.RowBasedGrouperHelper.RowBasedKey) ByteBuffer(java.nio.ByteBuffer) ResourceLimitExceededException(org.apache.druid.query.ResourceLimitExceededException) List(java.util.List) File(java.io.File) Nullable(javax.annotation.Nullable)

Example 5 with ResourceLimitExceededException

use of org.apache.druid.query.ResourceLimitExceededException in project druid by druid-io.

the class GroupByQueryHelper method createIndexAccumulatorPair.

public static <T> Pair<IncrementalIndex, Accumulator<IncrementalIndex, T>> createIndexAccumulatorPair(final GroupByQuery query, @Nullable final GroupByQuery subquery, final GroupByQueryConfig config) {
    final GroupByQueryConfig querySpecificConfig = config.withOverrides(query);
    final Granularity gran = query.getGranularity();
    final long timeStart = query.getIntervals().get(0).getStartMillis();
    final boolean combine = subquery == null;
    long granTimeStart = timeStart;
    if (!(Granularities.ALL.equals(gran))) {
        granTimeStart = gran.bucketStart(timeStart);
    }
    final List<AggregatorFactory> aggs;
    if (combine) {
        aggs = Lists.transform(query.getAggregatorSpecs(), new Function<AggregatorFactory, AggregatorFactory>() {

            @Override
            public AggregatorFactory apply(AggregatorFactory input) {
                return input.getCombiningFactory();
            }
        });
    } else {
        aggs = query.getAggregatorSpecs();
    }
    final List<String> dimensions = Lists.transform(query.getDimensions(), new Function<DimensionSpec, String>() {

        @Override
        public String apply(DimensionSpec input) {
            return input.getOutputName();
        }
    });
    final IncrementalIndex index;
    final boolean sortResults = query.getContextValue(CTX_KEY_SORT_RESULTS, true);
    // All groupBy dimensions are strings, for now.
    final List<DimensionSchema> dimensionSchemas = new ArrayList<>();
    for (DimensionSpec dimension : query.getDimensions()) {
        dimensionSchemas.add(new StringDimensionSchema(dimension.getOutputName()));
    }
    final IncrementalIndexSchema indexSchema = new IncrementalIndexSchema.Builder().withDimensionsSpec(new DimensionsSpec(dimensionSchemas)).withMetrics(aggs.toArray(new AggregatorFactory[0])).withQueryGranularity(gran).withMinTimestamp(granTimeStart).build();
    final AppendableIndexBuilder indexBuilder;
    if (query.getContextValue("useOffheap", false)) {
        throw new UnsupportedOperationException("The 'useOffheap' option is no longer available for groupBy v1. Please move to the newer groupBy engine, " + "which always operates off-heap, by removing any custom 'druid.query.groupBy.defaultStrategy' runtime " + "properties and 'groupByStrategy' query context parameters that you have set.");
    } else {
        indexBuilder = new OnheapIncrementalIndex.Builder();
    }
    index = indexBuilder.setIndexSchema(indexSchema).setDeserializeComplexMetrics(false).setConcurrentEventAdd(true).setSortFacts(sortResults).setMaxRowCount(querySpecificConfig.getMaxResults()).build();
    Accumulator<IncrementalIndex, T> accumulator = new Accumulator<IncrementalIndex, T>() {

        @Override
        public IncrementalIndex accumulate(IncrementalIndex accumulated, T in) {
            final MapBasedRow mapBasedRow;
            if (in instanceof MapBasedRow) {
                mapBasedRow = (MapBasedRow) in;
            } else if (in instanceof ResultRow) {
                final ResultRow row = (ResultRow) in;
                mapBasedRow = row.toMapBasedRow(combine ? query : subquery);
            } else {
                throw new ISE("Unable to accumulate something of type [%s]", in.getClass());
            }
            try {
                accumulated.add(new MapBasedInputRow(mapBasedRow.getTimestamp(), dimensions, mapBasedRow.getEvent()));
            } catch (IndexSizeExceededException e) {
                throw new ResourceLimitExceededException(e.getMessage());
            }
            return accumulated;
        }
    };
    return new Pair<>(index, accumulator);
}
Also used : Accumulator(org.apache.druid.java.util.common.guava.Accumulator) DimensionSpec(org.apache.druid.query.dimension.DimensionSpec) AppendableIndexBuilder(org.apache.druid.segment.incremental.AppendableIndexBuilder) ArrayList(java.util.ArrayList) OnheapIncrementalIndex(org.apache.druid.segment.incremental.OnheapIncrementalIndex) Granularity(org.apache.druid.java.util.common.granularity.Granularity) StringDimensionSchema(org.apache.druid.data.input.impl.StringDimensionSchema) DimensionSchema(org.apache.druid.data.input.impl.DimensionSchema) MapBasedRow(org.apache.druid.data.input.MapBasedRow) Function(com.google.common.base.Function) ISE(org.apache.druid.java.util.common.ISE) MapBasedInputRow(org.apache.druid.data.input.MapBasedInputRow) IncrementalIndexSchema(org.apache.druid.segment.incremental.IncrementalIndexSchema) Pair(org.apache.druid.java.util.common.Pair) IncrementalIndex(org.apache.druid.segment.incremental.IncrementalIndex) OnheapIncrementalIndex(org.apache.druid.segment.incremental.OnheapIncrementalIndex) AggregatorFactory(org.apache.druid.query.aggregation.AggregatorFactory) StringDimensionSchema(org.apache.druid.data.input.impl.StringDimensionSchema) ResourceLimitExceededException(org.apache.druid.query.ResourceLimitExceededException) DimensionsSpec(org.apache.druid.data.input.impl.DimensionsSpec) IndexSizeExceededException(org.apache.druid.segment.incremental.IndexSizeExceededException)

Aggregations

ResourceLimitExceededException (org.apache.druid.query.ResourceLimitExceededException)7 IOException (java.io.IOException)3 Response (javax.ws.rs.core.Response)3 Accumulator (org.apache.druid.java.util.common.guava.Accumulator)3 QueryCapacityExceededException (org.apache.druid.query.QueryCapacityExceededException)3 QueryTimeoutException (org.apache.druid.query.QueryTimeoutException)3 JsonProcessingException (com.fasterxml.jackson.core.JsonProcessingException)2 CountingOutputStream (com.google.common.io.CountingOutputStream)2 AtomicLong (java.util.concurrent.atomic.AtomicLong)2 Consumes (javax.ws.rs.Consumes)2 POST (javax.ws.rs.POST)2 Produces (javax.ws.rs.Produces)2 StreamingOutput (javax.ws.rs.core.StreamingOutput)2 BadQueryException (org.apache.druid.query.BadQueryException)2 QueryInterruptedException (org.apache.druid.query.QueryInterruptedException)2 QueryUnsupportedException (org.apache.druid.query.QueryUnsupportedException)2 ForbiddenException (org.apache.druid.server.security.ForbiddenException)2 JsonParseException (com.fasterxml.jackson.core.JsonParseException)1 JavaType (com.fasterxml.jackson.databind.JavaType)1 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)1