use of org.apache.druid.query.ResourceLimitExceededException in project druid by druid-io.
the class SqlResource method doPost.
@POST
@Produces(MediaType.APPLICATION_JSON)
@Consumes(MediaType.APPLICATION_JSON)
public Response doPost(final SqlQuery sqlQuery, @Context final HttpServletRequest req) throws IOException {
final SqlLifecycle lifecycle = sqlLifecycleFactory.factorize();
final String sqlQueryId = lifecycle.initialize(sqlQuery.getQuery(), sqlQuery.getContext());
final String remoteAddr = req.getRemoteAddr();
final String currThreadName = Thread.currentThread().getName();
try {
Thread.currentThread().setName(StringUtils.format("sql[%s]", sqlQueryId));
lifecycle.setParameters(sqlQuery.getParameterList());
lifecycle.validateAndAuthorize(req);
// must add after lifecycle is authorized
sqlLifecycleManager.add(sqlQueryId, lifecycle);
lifecycle.plan();
final SqlRowTransformer rowTransformer = lifecycle.createRowTransformer();
final Sequence<Object[]> sequence = lifecycle.execute();
final Yielder<Object[]> yielder0 = Yielders.each(sequence);
try {
final Response.ResponseBuilder responseBuilder = Response.ok((StreamingOutput) outputStream -> {
Exception e = null;
CountingOutputStream os = new CountingOutputStream(outputStream);
Yielder<Object[]> yielder = yielder0;
try (final ResultFormat.Writer writer = sqlQuery.getResultFormat().createFormatter(os, jsonMapper)) {
writer.writeResponseStart();
if (sqlQuery.includeHeader()) {
writer.writeHeader(rowTransformer.getRowType(), sqlQuery.includeTypesHeader(), sqlQuery.includeSqlTypesHeader());
}
while (!yielder.isDone()) {
final Object[] row = yielder.get();
writer.writeRowStart();
for (int i = 0; i < rowTransformer.getFieldList().size(); i++) {
final Object value = rowTransformer.transform(row, i);
writer.writeRowField(rowTransformer.getFieldList().get(i), value);
}
writer.writeRowEnd();
yielder = yielder.next(null);
}
writer.writeResponseEnd();
} catch (Exception ex) {
e = ex;
log.error(ex, "Unable to send SQL response [%s]", sqlQueryId);
throw new RuntimeException(ex);
} finally {
yielder.close();
endLifecycle(sqlQueryId, lifecycle, e, remoteAddr, os.getCount());
}
}).header(SQL_QUERY_ID_RESPONSE_HEADER, sqlQueryId);
if (sqlQuery.includeHeader()) {
responseBuilder.header(SQL_HEADER_RESPONSE_HEADER, SQL_HEADER_VALUE);
}
return responseBuilder.build();
} catch (Throwable e) {
// make sure to close yielder if anything happened before starting to serialize the response.
yielder0.close();
throw new RuntimeException(e);
}
} catch (QueryCapacityExceededException cap) {
endLifecycle(sqlQueryId, lifecycle, cap, remoteAddr, -1);
return buildNonOkResponse(QueryCapacityExceededException.STATUS_CODE, cap, sqlQueryId);
} catch (QueryUnsupportedException unsupported) {
endLifecycle(sqlQueryId, lifecycle, unsupported, remoteAddr, -1);
return buildNonOkResponse(QueryUnsupportedException.STATUS_CODE, unsupported, sqlQueryId);
} catch (QueryTimeoutException timeout) {
endLifecycle(sqlQueryId, lifecycle, timeout, remoteAddr, -1);
return buildNonOkResponse(QueryTimeoutException.STATUS_CODE, timeout, sqlQueryId);
} catch (SqlPlanningException | ResourceLimitExceededException e) {
endLifecycle(sqlQueryId, lifecycle, e, remoteAddr, -1);
return buildNonOkResponse(BadQueryException.STATUS_CODE, e, sqlQueryId);
} catch (ForbiddenException e) {
endLifecycleWithoutEmittingMetrics(sqlQueryId, lifecycle);
throw (ForbiddenException) serverConfig.getErrorResponseTransformStrategy().transformIfNeeded(// let ForbiddenExceptionMapper handle this
e);
} catch (RelOptPlanner.CannotPlanException e) {
endLifecycle(sqlQueryId, lifecycle, e, remoteAddr, -1);
SqlPlanningException spe = new SqlPlanningException(SqlPlanningException.PlanningError.UNSUPPORTED_SQL_ERROR, e.getMessage());
return buildNonOkResponse(BadQueryException.STATUS_CODE, spe, sqlQueryId);
}// calcite throws a java.lang.AssertionError which is type error not exception. using throwable will catch all
catch (Throwable e) {
log.warn(e, "Failed to handle query: %s", sqlQuery);
endLifecycle(sqlQueryId, lifecycle, e, remoteAddr, -1);
return buildNonOkResponse(Status.INTERNAL_SERVER_ERROR.getStatusCode(), QueryInterruptedException.wrapIfNeeded(e), sqlQueryId);
} finally {
Thread.currentThread().setName(currThreadName);
}
}
use of org.apache.druid.query.ResourceLimitExceededException in project druid by druid-io.
the class QueryResource method doPost.
@POST
@Produces({ MediaType.APPLICATION_JSON, SmileMediaTypes.APPLICATION_JACKSON_SMILE })
@Consumes({ MediaType.APPLICATION_JSON, SmileMediaTypes.APPLICATION_JACKSON_SMILE, APPLICATION_SMILE })
public Response doPost(final InputStream in, @QueryParam("pretty") final String pretty, // used to get request content-type,Accept header, remote address and auth-related headers
@Context final HttpServletRequest req) throws IOException {
final QueryLifecycle queryLifecycle = queryLifecycleFactory.factorize();
Query<?> query = null;
final ResourceIOReaderWriter ioReaderWriter = createResourceIOReaderWriter(req, pretty != null);
final String currThreadName = Thread.currentThread().getName();
try {
queryLifecycle.initialize(readQuery(req, in, ioReaderWriter));
query = queryLifecycle.getQuery();
final String queryId = query.getId();
final String queryThreadName = StringUtils.format("%s[%s_%s_%s]", currThreadName, query.getType(), query.getDataSource().getTableNames(), queryId);
Thread.currentThread().setName(queryThreadName);
if (log.isDebugEnabled()) {
log.debug("Got query [%s]", query);
}
final Access authResult = queryLifecycle.authorize(req);
if (!authResult.isAllowed()) {
throw new ForbiddenException(authResult.toString());
}
final QueryLifecycle.QueryResponse queryResponse = queryLifecycle.execute();
final Sequence<?> results = queryResponse.getResults();
final ResponseContext responseContext = queryResponse.getResponseContext();
final String prevEtag = getPreviousEtag(req);
if (prevEtag != null && prevEtag.equals(responseContext.getEntityTag())) {
queryLifecycle.emitLogsAndMetrics(null, req.getRemoteAddr(), -1);
successfulQueryCount.incrementAndGet();
return Response.notModified().build();
}
final Yielder<?> yielder = Yielders.each(results);
try {
boolean shouldFinalize = QueryContexts.isFinalize(query, true);
boolean serializeDateTimeAsLong = QueryContexts.isSerializeDateTimeAsLong(query, false) || (!shouldFinalize && QueryContexts.isSerializeDateTimeAsLongInner(query, false));
final ObjectWriter jsonWriter = ioReaderWriter.getResponseWriter().newOutputWriter(queryLifecycle.getToolChest(), queryLifecycle.getQuery(), serializeDateTimeAsLong);
Response.ResponseBuilder responseBuilder = Response.ok(new StreamingOutput() {
@Override
public void write(OutputStream outputStream) throws WebApplicationException {
Exception e = null;
CountingOutputStream os = new CountingOutputStream(outputStream);
try {
// json serializer will always close the yielder
jsonWriter.writeValue(os, yielder);
// Some types of OutputStream suppress flush errors in the .close() method.
os.flush();
os.close();
} catch (Exception ex) {
e = ex;
log.noStackTrace().error(ex, "Unable to send query response.");
throw new RuntimeException(ex);
} finally {
Thread.currentThread().setName(currThreadName);
queryLifecycle.emitLogsAndMetrics(e, req.getRemoteAddr(), os.getCount());
if (e == null) {
successfulQueryCount.incrementAndGet();
} else {
failedQueryCount.incrementAndGet();
}
}
}
}, ioReaderWriter.getResponseWriter().getResponseType()).header("X-Druid-Query-Id", queryId);
transferEntityTag(responseContext, responseBuilder);
DirectDruidClient.removeMagicResponseContextFields(responseContext);
// Limit the response-context header, see https://github.com/apache/druid/issues/2331
// Note that Response.ResponseBuilder.header(String key,Object value).build() calls value.toString()
// and encodes the string using ASCII, so 1 char is = 1 byte
final ResponseContext.SerializationResult serializationResult = responseContext.serializeWith(jsonMapper, responseContextConfig.getMaxResponseContextHeaderSize());
if (serializationResult.isTruncated()) {
final String logToPrint = StringUtils.format("Response Context truncated for id [%s]. Full context is [%s].", queryId, serializationResult.getFullResult());
if (responseContextConfig.shouldFailOnTruncatedResponseContext()) {
log.error(logToPrint);
throw new QueryInterruptedException(new TruncatedResponseContextException("Serialized response context exceeds the max size[%s]", responseContextConfig.getMaxResponseContextHeaderSize()), selfNode.getHostAndPortToUse());
} else {
log.warn(logToPrint);
}
}
return responseBuilder.header(HEADER_RESPONSE_CONTEXT, serializationResult.getResult()).build();
} catch (QueryException e) {
// make sure to close yielder if anything happened before starting to serialize the response.
yielder.close();
throw e;
} catch (Exception e) {
// make sure to close yielder if anything happened before starting to serialize the response.
yielder.close();
throw new RuntimeException(e);
} finally {
// do not close yielder here, since we do not want to close the yielder prior to
// StreamingOutput having iterated over all the results
}
} catch (QueryInterruptedException e) {
interruptedQueryCount.incrementAndGet();
queryLifecycle.emitLogsAndMetrics(e, req.getRemoteAddr(), -1);
return ioReaderWriter.getResponseWriter().gotError(e);
} catch (QueryTimeoutException timeout) {
timedOutQueryCount.incrementAndGet();
queryLifecycle.emitLogsAndMetrics(timeout, req.getRemoteAddr(), -1);
return ioReaderWriter.getResponseWriter().gotTimeout(timeout);
} catch (QueryCapacityExceededException cap) {
failedQueryCount.incrementAndGet();
queryLifecycle.emitLogsAndMetrics(cap, req.getRemoteAddr(), -1);
return ioReaderWriter.getResponseWriter().gotLimited(cap);
} catch (QueryUnsupportedException unsupported) {
failedQueryCount.incrementAndGet();
queryLifecycle.emitLogsAndMetrics(unsupported, req.getRemoteAddr(), -1);
return ioReaderWriter.getResponseWriter().gotUnsupported(unsupported);
} catch (BadJsonQueryException | ResourceLimitExceededException e) {
interruptedQueryCount.incrementAndGet();
queryLifecycle.emitLogsAndMetrics(e, req.getRemoteAddr(), -1);
return ioReaderWriter.getResponseWriter().gotBadQuery(e);
} catch (ForbiddenException e) {
// send an error response if this is thrown.
throw e;
} catch (Exception e) {
failedQueryCount.incrementAndGet();
queryLifecycle.emitLogsAndMetrics(e, req.getRemoteAddr(), -1);
log.noStackTrace().makeAlert(e, "Exception handling request").addData("query", query != null ? jsonMapper.writeValueAsString(query) : "unparseable query").addData("peer", req.getRemoteAddr()).emit();
return ioReaderWriter.getResponseWriter().gotError(e);
} finally {
Thread.currentThread().setName(currThreadName);
}
}
use of org.apache.druid.query.ResourceLimitExceededException in project druid by druid-io.
the class DirectDruidClient method run.
@Override
public Sequence<T> run(final QueryPlus<T> queryPlus, final ResponseContext context) {
final Query<T> query = queryPlus.getQuery();
QueryToolChest<T, Query<T>> toolChest = warehouse.getToolChest(query);
boolean isBySegment = QueryContexts.isBySegment(query);
final JavaType queryResultType = isBySegment ? toolChest.getBySegmentResultType() : toolChest.getBaseResultType();
final ListenableFuture<InputStream> future;
final String url = scheme + "://" + host + "/druid/v2/";
final String cancelUrl = url + query.getId();
try {
log.debug("Querying queryId[%s] url[%s]", query.getId(), url);
final long requestStartTimeNs = System.nanoTime();
final long timeoutAt = query.getContextValue(QUERY_FAIL_TIME);
final long maxScatterGatherBytes = QueryContexts.getMaxScatterGatherBytes(query);
final AtomicLong totalBytesGathered = context.getTotalBytes();
final long maxQueuedBytes = QueryContexts.getMaxQueuedBytes(query, 0);
final boolean usingBackpressure = maxQueuedBytes > 0;
final HttpResponseHandler<InputStream, InputStream> responseHandler = new HttpResponseHandler<InputStream, InputStream>() {
private final AtomicLong totalByteCount = new AtomicLong(0);
private final AtomicLong queuedByteCount = new AtomicLong(0);
private final AtomicLong channelSuspendedTime = new AtomicLong(0);
private final BlockingQueue<InputStreamHolder> queue = new LinkedBlockingQueue<>();
private final AtomicBoolean done = new AtomicBoolean(false);
private final AtomicReference<String> fail = new AtomicReference<>();
private final AtomicReference<TrafficCop> trafficCopRef = new AtomicReference<>();
private QueryMetrics<? super Query<T>> queryMetrics;
private long responseStartTimeNs;
private QueryMetrics<? super Query<T>> acquireResponseMetrics() {
if (queryMetrics == null) {
queryMetrics = toolChest.makeMetrics(query);
queryMetrics.server(host);
}
return queryMetrics;
}
/**
* Queue a buffer. Returns true if we should keep reading, false otherwise.
*/
private boolean enqueue(ChannelBuffer buffer, long chunkNum) throws InterruptedException {
// Increment queuedByteCount before queueing the object, so queuedByteCount is at least as high as
// the actual number of queued bytes at any particular time.
final InputStreamHolder holder = InputStreamHolder.fromChannelBuffer(buffer, chunkNum);
final long currentQueuedByteCount = queuedByteCount.addAndGet(holder.getLength());
queue.put(holder);
// True if we should keep reading.
return !usingBackpressure || currentQueuedByteCount < maxQueuedBytes;
}
private InputStream dequeue() throws InterruptedException {
final InputStreamHolder holder = queue.poll(checkQueryTimeout(), TimeUnit.MILLISECONDS);
if (holder == null) {
throw new QueryTimeoutException(StringUtils.nonStrictFormat("Query[%s] url[%s] timed out.", query.getId(), url));
}
final long currentQueuedByteCount = queuedByteCount.addAndGet(-holder.getLength());
if (usingBackpressure && currentQueuedByteCount < maxQueuedBytes) {
long backPressureTime = Preconditions.checkNotNull(trafficCopRef.get(), "No TrafficCop, how can this be?").resume(holder.getChunkNum());
channelSuspendedTime.addAndGet(backPressureTime);
}
return holder.getStream();
}
@Override
public ClientResponse<InputStream> handleResponse(HttpResponse response, TrafficCop trafficCop) {
trafficCopRef.set(trafficCop);
checkQueryTimeout();
checkTotalBytesLimit(response.getContent().readableBytes());
log.debug("Initial response from url[%s] for queryId[%s]", url, query.getId());
responseStartTimeNs = System.nanoTime();
acquireResponseMetrics().reportNodeTimeToFirstByte(responseStartTimeNs - requestStartTimeNs).emit(emitter);
final boolean continueReading;
try {
log.trace("Got a response from [%s] for query ID[%s], subquery ID[%s]", url, query.getId(), query.getSubQueryId());
final String responseContext = response.headers().get(QueryResource.HEADER_RESPONSE_CONTEXT);
context.addRemainingResponse(query.getMostSpecificId(), VAL_TO_REDUCE_REMAINING_RESPONSES);
// context may be null in case of error or query timeout
if (responseContext != null) {
context.merge(ResponseContext.deserialize(responseContext, objectMapper));
}
continueReading = enqueue(response.getContent(), 0L);
} catch (final IOException e) {
log.error(e, "Error parsing response context from url [%s]", url);
return ClientResponse.finished(new InputStream() {
@Override
public int read() throws IOException {
throw e;
}
});
} catch (InterruptedException e) {
log.error(e, "Queue appending interrupted");
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
totalByteCount.addAndGet(response.getContent().readableBytes());
return ClientResponse.finished(new SequenceInputStream(new Enumeration<InputStream>() {
@Override
public boolean hasMoreElements() {
if (fail.get() != null) {
throw new RE(fail.get());
}
checkQueryTimeout();
// Then the stream should be spouting good InputStreams.
synchronized (done) {
return !done.get() || !queue.isEmpty();
}
}
@Override
public InputStream nextElement() {
if (fail.get() != null) {
throw new RE(fail.get());
}
try {
return dequeue();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
}
}), continueReading);
}
@Override
public ClientResponse<InputStream> handleChunk(ClientResponse<InputStream> clientResponse, HttpChunk chunk, long chunkNum) {
checkQueryTimeout();
final ChannelBuffer channelBuffer = chunk.getContent();
final int bytes = channelBuffer.readableBytes();
checkTotalBytesLimit(bytes);
boolean continueReading = true;
if (bytes > 0) {
try {
continueReading = enqueue(channelBuffer, chunkNum);
} catch (InterruptedException e) {
log.error(e, "Unable to put finalizing input stream into Sequence queue for url [%s]", url);
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
totalByteCount.addAndGet(bytes);
}
return ClientResponse.finished(clientResponse.getObj(), continueReading);
}
@Override
public ClientResponse<InputStream> done(ClientResponse<InputStream> clientResponse) {
long stopTimeNs = System.nanoTime();
long nodeTimeNs = stopTimeNs - requestStartTimeNs;
final long nodeTimeMs = TimeUnit.NANOSECONDS.toMillis(nodeTimeNs);
log.debug("Completed queryId[%s] request to url[%s] with %,d bytes returned in %,d millis [%,f b/s].", query.getId(), url, totalByteCount.get(), nodeTimeMs, // Floating math; division by zero will yield Inf, not exception
totalByteCount.get() / (0.001 * nodeTimeMs));
QueryMetrics<? super Query<T>> responseMetrics = acquireResponseMetrics();
responseMetrics.reportNodeTime(nodeTimeNs);
responseMetrics.reportNodeBytes(totalByteCount.get());
if (usingBackpressure) {
responseMetrics.reportBackPressureTime(channelSuspendedTime.get());
}
responseMetrics.emit(emitter);
synchronized (done) {
try {
// An empty byte array is put at the end to give the SequenceInputStream.close() as something to close out
// after done is set to true, regardless of the rest of the stream's state.
queue.put(InputStreamHolder.fromChannelBuffer(ChannelBuffers.EMPTY_BUFFER, Long.MAX_VALUE));
} catch (InterruptedException e) {
log.error(e, "Unable to put finalizing input stream into Sequence queue for url [%s]", url);
Thread.currentThread().interrupt();
throw new RuntimeException(e);
} finally {
done.set(true);
}
}
return ClientResponse.finished(clientResponse.getObj());
}
@Override
public void exceptionCaught(final ClientResponse<InputStream> clientResponse, final Throwable e) {
String msg = StringUtils.format("Query[%s] url[%s] failed with exception msg [%s]", query.getId(), url, e.getMessage());
setupResponseReadFailure(msg, e);
}
private void setupResponseReadFailure(String msg, Throwable th) {
fail.set(msg);
queue.clear();
queue.offer(InputStreamHolder.fromStream(new InputStream() {
@Override
public int read() throws IOException {
if (th != null) {
throw new IOException(msg, th);
} else {
throw new IOException(msg);
}
}
}, -1, 0));
}
// Returns remaining timeout or throws exception if timeout already elapsed.
private long checkQueryTimeout() {
long timeLeft = timeoutAt - System.currentTimeMillis();
if (timeLeft <= 0) {
String msg = StringUtils.format("Query[%s] url[%s] timed out.", query.getId(), url);
setupResponseReadFailure(msg, null);
throw new QueryTimeoutException(msg);
} else {
return timeLeft;
}
}
private void checkTotalBytesLimit(long bytes) {
if (maxScatterGatherBytes < Long.MAX_VALUE && totalBytesGathered.addAndGet(bytes) > maxScatterGatherBytes) {
String msg = StringUtils.format("Query[%s] url[%s] max scatter-gather bytes limit reached.", query.getId(), url);
setupResponseReadFailure(msg, null);
throw new ResourceLimitExceededException(msg);
}
}
};
long timeLeft = timeoutAt - System.currentTimeMillis();
if (timeLeft <= 0) {
throw new QueryTimeoutException(StringUtils.nonStrictFormat("Query[%s] url[%s] timed out.", query.getId(), url));
}
future = httpClient.go(new Request(HttpMethod.POST, new URL(url)).setContent(objectMapper.writeValueAsBytes(QueryContexts.withTimeout(query, timeLeft))).setHeader(HttpHeaders.Names.CONTENT_TYPE, isSmile ? SmileMediaTypes.APPLICATION_JACKSON_SMILE : MediaType.APPLICATION_JSON), responseHandler, Duration.millis(timeLeft));
queryWatcher.registerQueryFuture(query, future);
openConnections.getAndIncrement();
Futures.addCallback(future, new FutureCallback<InputStream>() {
@Override
public void onSuccess(InputStream result) {
openConnections.getAndDecrement();
}
@Override
public void onFailure(Throwable t) {
openConnections.getAndDecrement();
if (future.isCancelled()) {
cancelQuery(query, cancelUrl);
}
}
}, // The callback is non-blocking and quick, so it's OK to schedule it using directExecutor()
Execs.directExecutor());
} catch (IOException e) {
throw new RuntimeException(e);
}
Sequence<T> retVal = new BaseSequence<>(new BaseSequence.IteratorMaker<T, JsonParserIterator<T>>() {
@Override
public JsonParserIterator<T> make() {
return new JsonParserIterator<T>(queryResultType, future, url, query, host, toolChest.decorateObjectMapper(objectMapper, query));
}
@Override
public void cleanup(JsonParserIterator<T> iterFromMake) {
CloseableUtils.closeAndWrapExceptions(iterFromMake);
}
});
// avoid the cost of de-serializing and then re-serializing again when adding to cache
if (!isBySegment) {
retVal = Sequences.map(retVal, toolChest.makePreComputeManipulatorFn(query, MetricManipulatorFns.deserializing()));
}
return retVal;
}
use of org.apache.druid.query.ResourceLimitExceededException in project druid by druid-io.
the class GroupByRowProcessor method process.
/**
* Process the input of sequence "rows" (output by "subquery") based on "query" and returns a {@link ResultSupplier}.
*
* In addition to grouping using dimensions and metrics, it will also apply filters (both DimFilter and interval
* filters).
*
* The input sequence is processed synchronously with the call to this method, and result iteration happens lazy upon
* calls to the {@link ResultSupplier}. Make sure to close it when you're done.
*/
public static ResultSupplier process(final GroupByQuery query, final GroupByQuery subquery, final Sequence<ResultRow> rows, final GroupByQueryConfig config, final GroupByQueryResource resource, final ObjectMapper spillMapper, final String processingTmpDir, final int mergeBufferSize) {
final Closer closeOnExit = Closer.create();
final GroupByQueryConfig querySpecificConfig = config.withOverrides(query);
final File temporaryStorageDirectory = new File(processingTmpDir, StringUtils.format("druid-groupBy-%s_%s", UUID.randomUUID(), query.getId()));
final LimitedTemporaryStorage temporaryStorage = new LimitedTemporaryStorage(temporaryStorageDirectory, querySpecificConfig.getMaxOnDiskStorage());
closeOnExit.register(temporaryStorage);
Pair<Grouper<RowBasedKey>, Accumulator<AggregateResult, ResultRow>> pair = RowBasedGrouperHelper.createGrouperAccumulatorPair(query, subquery, querySpecificConfig, new Supplier<ByteBuffer>() {
@Override
public ByteBuffer get() {
final ResourceHolder<ByteBuffer> mergeBufferHolder = resource.getMergeBuffer();
closeOnExit.register(mergeBufferHolder);
return mergeBufferHolder.get();
}
}, temporaryStorage, spillMapper, mergeBufferSize);
final Grouper<RowBasedKey> grouper = pair.lhs;
final Accumulator<AggregateResult, ResultRow> accumulator = pair.rhs;
closeOnExit.register(grouper);
final AggregateResult retVal = rows.accumulate(AggregateResult.ok(), accumulator);
if (!retVal.isOk()) {
throw new ResourceLimitExceededException(retVal.getReason());
}
return new ResultSupplier() {
@Override
public Sequence<ResultRow> results(@Nullable List<DimensionSpec> dimensionsToInclude) {
return getRowsFromGrouper(query, grouper, dimensionsToInclude);
}
@Override
public void close() throws IOException {
closeOnExit.close();
}
};
}
use of org.apache.druid.query.ResourceLimitExceededException in project druid by druid-io.
the class GroupByQueryHelper method createIndexAccumulatorPair.
public static <T> Pair<IncrementalIndex, Accumulator<IncrementalIndex, T>> createIndexAccumulatorPair(final GroupByQuery query, @Nullable final GroupByQuery subquery, final GroupByQueryConfig config) {
final GroupByQueryConfig querySpecificConfig = config.withOverrides(query);
final Granularity gran = query.getGranularity();
final long timeStart = query.getIntervals().get(0).getStartMillis();
final boolean combine = subquery == null;
long granTimeStart = timeStart;
if (!(Granularities.ALL.equals(gran))) {
granTimeStart = gran.bucketStart(timeStart);
}
final List<AggregatorFactory> aggs;
if (combine) {
aggs = Lists.transform(query.getAggregatorSpecs(), new Function<AggregatorFactory, AggregatorFactory>() {
@Override
public AggregatorFactory apply(AggregatorFactory input) {
return input.getCombiningFactory();
}
});
} else {
aggs = query.getAggregatorSpecs();
}
final List<String> dimensions = Lists.transform(query.getDimensions(), new Function<DimensionSpec, String>() {
@Override
public String apply(DimensionSpec input) {
return input.getOutputName();
}
});
final IncrementalIndex index;
final boolean sortResults = query.getContextValue(CTX_KEY_SORT_RESULTS, true);
// All groupBy dimensions are strings, for now.
final List<DimensionSchema> dimensionSchemas = new ArrayList<>();
for (DimensionSpec dimension : query.getDimensions()) {
dimensionSchemas.add(new StringDimensionSchema(dimension.getOutputName()));
}
final IncrementalIndexSchema indexSchema = new IncrementalIndexSchema.Builder().withDimensionsSpec(new DimensionsSpec(dimensionSchemas)).withMetrics(aggs.toArray(new AggregatorFactory[0])).withQueryGranularity(gran).withMinTimestamp(granTimeStart).build();
final AppendableIndexBuilder indexBuilder;
if (query.getContextValue("useOffheap", false)) {
throw new UnsupportedOperationException("The 'useOffheap' option is no longer available for groupBy v1. Please move to the newer groupBy engine, " + "which always operates off-heap, by removing any custom 'druid.query.groupBy.defaultStrategy' runtime " + "properties and 'groupByStrategy' query context parameters that you have set.");
} else {
indexBuilder = new OnheapIncrementalIndex.Builder();
}
index = indexBuilder.setIndexSchema(indexSchema).setDeserializeComplexMetrics(false).setConcurrentEventAdd(true).setSortFacts(sortResults).setMaxRowCount(querySpecificConfig.getMaxResults()).build();
Accumulator<IncrementalIndex, T> accumulator = new Accumulator<IncrementalIndex, T>() {
@Override
public IncrementalIndex accumulate(IncrementalIndex accumulated, T in) {
final MapBasedRow mapBasedRow;
if (in instanceof MapBasedRow) {
mapBasedRow = (MapBasedRow) in;
} else if (in instanceof ResultRow) {
final ResultRow row = (ResultRow) in;
mapBasedRow = row.toMapBasedRow(combine ? query : subquery);
} else {
throw new ISE("Unable to accumulate something of type [%s]", in.getClass());
}
try {
accumulated.add(new MapBasedInputRow(mapBasedRow.getTimestamp(), dimensions, mapBasedRow.getEvent()));
} catch (IndexSizeExceededException e) {
throw new ResourceLimitExceededException(e.getMessage());
}
return accumulated;
}
};
return new Pair<>(index, accumulator);
}
Aggregations