use of org.apache.druid.query.context.ResponseContext in project druid by druid-io.
the class ServerManagerTest method testGetQueryRunnerForSegmentsWhenTimelineEntryIsMissingReportingMissingSegments.
@Test
public void testGetQueryRunnerForSegmentsWhenTimelineEntryIsMissingReportingMissingSegments() {
final Interval interval = Intervals.of("P1d/2011-04-01");
final SearchQuery query = searchQuery("test", interval, Granularities.ALL);
final List<SegmentDescriptor> unknownSegments = Collections.singletonList(new SegmentDescriptor(interval, "unknown_version", 0));
final QueryRunner<Result<SearchResultValue>> queryRunner = serverManager.getQueryRunnerForSegments(query, unknownSegments);
final ResponseContext responseContext = DefaultResponseContext.createEmpty();
final List<Result<SearchResultValue>> results = queryRunner.run(QueryPlus.wrap(query), responseContext).toList();
Assert.assertTrue(results.isEmpty());
Assert.assertNotNull(responseContext.getMissingSegments());
Assert.assertEquals(unknownSegments, responseContext.getMissingSegments());
}
use of org.apache.druid.query.context.ResponseContext in project druid by druid-io.
the class QueryResource method doPost.
@POST
@Produces({ MediaType.APPLICATION_JSON, SmileMediaTypes.APPLICATION_JACKSON_SMILE })
@Consumes({ MediaType.APPLICATION_JSON, SmileMediaTypes.APPLICATION_JACKSON_SMILE, APPLICATION_SMILE })
public Response doPost(final InputStream in, @QueryParam("pretty") final String pretty, // used to get request content-type,Accept header, remote address and auth-related headers
@Context final HttpServletRequest req) throws IOException {
final QueryLifecycle queryLifecycle = queryLifecycleFactory.factorize();
Query<?> query = null;
final ResourceIOReaderWriter ioReaderWriter = createResourceIOReaderWriter(req, pretty != null);
final String currThreadName = Thread.currentThread().getName();
try {
queryLifecycle.initialize(readQuery(req, in, ioReaderWriter));
query = queryLifecycle.getQuery();
final String queryId = query.getId();
final String queryThreadName = StringUtils.format("%s[%s_%s_%s]", currThreadName, query.getType(), query.getDataSource().getTableNames(), queryId);
Thread.currentThread().setName(queryThreadName);
if (log.isDebugEnabled()) {
log.debug("Got query [%s]", query);
}
final Access authResult = queryLifecycle.authorize(req);
if (!authResult.isAllowed()) {
throw new ForbiddenException(authResult.toString());
}
final QueryLifecycle.QueryResponse queryResponse = queryLifecycle.execute();
final Sequence<?> results = queryResponse.getResults();
final ResponseContext responseContext = queryResponse.getResponseContext();
final String prevEtag = getPreviousEtag(req);
if (prevEtag != null && prevEtag.equals(responseContext.getEntityTag())) {
queryLifecycle.emitLogsAndMetrics(null, req.getRemoteAddr(), -1);
successfulQueryCount.incrementAndGet();
return Response.notModified().build();
}
final Yielder<?> yielder = Yielders.each(results);
try {
boolean shouldFinalize = QueryContexts.isFinalize(query, true);
boolean serializeDateTimeAsLong = QueryContexts.isSerializeDateTimeAsLong(query, false) || (!shouldFinalize && QueryContexts.isSerializeDateTimeAsLongInner(query, false));
final ObjectWriter jsonWriter = ioReaderWriter.getResponseWriter().newOutputWriter(queryLifecycle.getToolChest(), queryLifecycle.getQuery(), serializeDateTimeAsLong);
Response.ResponseBuilder responseBuilder = Response.ok(new StreamingOutput() {
@Override
public void write(OutputStream outputStream) throws WebApplicationException {
Exception e = null;
CountingOutputStream os = new CountingOutputStream(outputStream);
try {
// json serializer will always close the yielder
jsonWriter.writeValue(os, yielder);
// Some types of OutputStream suppress flush errors in the .close() method.
os.flush();
os.close();
} catch (Exception ex) {
e = ex;
log.noStackTrace().error(ex, "Unable to send query response.");
throw new RuntimeException(ex);
} finally {
Thread.currentThread().setName(currThreadName);
queryLifecycle.emitLogsAndMetrics(e, req.getRemoteAddr(), os.getCount());
if (e == null) {
successfulQueryCount.incrementAndGet();
} else {
failedQueryCount.incrementAndGet();
}
}
}
}, ioReaderWriter.getResponseWriter().getResponseType()).header("X-Druid-Query-Id", queryId);
transferEntityTag(responseContext, responseBuilder);
DirectDruidClient.removeMagicResponseContextFields(responseContext);
// Limit the response-context header, see https://github.com/apache/druid/issues/2331
// Note that Response.ResponseBuilder.header(String key,Object value).build() calls value.toString()
// and encodes the string using ASCII, so 1 char is = 1 byte
final ResponseContext.SerializationResult serializationResult = responseContext.serializeWith(jsonMapper, responseContextConfig.getMaxResponseContextHeaderSize());
if (serializationResult.isTruncated()) {
final String logToPrint = StringUtils.format("Response Context truncated for id [%s]. Full context is [%s].", queryId, serializationResult.getFullResult());
if (responseContextConfig.shouldFailOnTruncatedResponseContext()) {
log.error(logToPrint);
throw new QueryInterruptedException(new TruncatedResponseContextException("Serialized response context exceeds the max size[%s]", responseContextConfig.getMaxResponseContextHeaderSize()), selfNode.getHostAndPortToUse());
} else {
log.warn(logToPrint);
}
}
return responseBuilder.header(HEADER_RESPONSE_CONTEXT, serializationResult.getResult()).build();
} catch (QueryException e) {
// make sure to close yielder if anything happened before starting to serialize the response.
yielder.close();
throw e;
} catch (Exception e) {
// make sure to close yielder if anything happened before starting to serialize the response.
yielder.close();
throw new RuntimeException(e);
} finally {
// do not close yielder here, since we do not want to close the yielder prior to
// StreamingOutput having iterated over all the results
}
} catch (QueryInterruptedException e) {
interruptedQueryCount.incrementAndGet();
queryLifecycle.emitLogsAndMetrics(e, req.getRemoteAddr(), -1);
return ioReaderWriter.getResponseWriter().gotError(e);
} catch (QueryTimeoutException timeout) {
timedOutQueryCount.incrementAndGet();
queryLifecycle.emitLogsAndMetrics(timeout, req.getRemoteAddr(), -1);
return ioReaderWriter.getResponseWriter().gotTimeout(timeout);
} catch (QueryCapacityExceededException cap) {
failedQueryCount.incrementAndGet();
queryLifecycle.emitLogsAndMetrics(cap, req.getRemoteAddr(), -1);
return ioReaderWriter.getResponseWriter().gotLimited(cap);
} catch (QueryUnsupportedException unsupported) {
failedQueryCount.incrementAndGet();
queryLifecycle.emitLogsAndMetrics(unsupported, req.getRemoteAddr(), -1);
return ioReaderWriter.getResponseWriter().gotUnsupported(unsupported);
} catch (BadJsonQueryException | ResourceLimitExceededException e) {
interruptedQueryCount.incrementAndGet();
queryLifecycle.emitLogsAndMetrics(e, req.getRemoteAddr(), -1);
return ioReaderWriter.getResponseWriter().gotBadQuery(e);
} catch (ForbiddenException e) {
// send an error response if this is thrown.
throw e;
} catch (Exception e) {
failedQueryCount.incrementAndGet();
queryLifecycle.emitLogsAndMetrics(e, req.getRemoteAddr(), -1);
log.noStackTrace().makeAlert(e, "Exception handling request").addData("query", query != null ? jsonMapper.writeValueAsString(query) : "unparseable query").addData("peer", req.getRemoteAddr()).emit();
return ioReaderWriter.getResponseWriter().gotError(e);
} finally {
Thread.currentThread().setName(currThreadName);
}
}
use of org.apache.druid.query.context.ResponseContext in project druid by druid-io.
the class TimewarpOperatorTest method testEmptyFutureInterval.
@Test
public void testEmptyFutureInterval() {
QueryRunner<Result<TimeseriesResultValue>> queryRunner = testOperator.postProcess(new QueryRunner<Result<TimeseriesResultValue>>() {
@Override
public Sequence<Result<TimeseriesResultValue>> run(QueryPlus<Result<TimeseriesResultValue>> queryPlus, ResponseContext responseContext) {
final Query<Result<TimeseriesResultValue>> query = queryPlus.getQuery();
return Sequences.simple(ImmutableList.of(new Result<>(query.getIntervals().get(0).getStart(), new TimeseriesResultValue(ImmutableMap.of("metric", 2))), new Result<>(query.getIntervals().get(0).getEnd(), new TimeseriesResultValue(ImmutableMap.of("metric", 3)))));
}
}, DateTimes.of("2014-08-02").getMillis());
final Query<Result<TimeseriesResultValue>> query = Druids.newTimeseriesQueryBuilder().dataSource("dummy").intervals("2014-08-06/2014-08-08").aggregators(Collections.singletonList(new CountAggregatorFactory("count"))).build();
Assert.assertEquals(Lists.newArrayList(new Result<>(DateTimes.of("2014-08-02"), new TimeseriesResultValue(ImmutableMap.of("metric", 2))), new Result<>(DateTimes.of("2014-08-02"), new TimeseriesResultValue(ImmutableMap.of("metric", 3)))), queryRunner.run(QueryPlus.wrap(query)).toList());
}
use of org.apache.druid.query.context.ResponseContext in project druid by druid-io.
the class TimewarpOperatorTest method testPostProcess.
@Test
public void testPostProcess() {
QueryRunner<Result<TimeseriesResultValue>> queryRunner = testOperator.postProcess(new QueryRunner<Result<TimeseriesResultValue>>() {
@Override
public Sequence<Result<TimeseriesResultValue>> run(QueryPlus<Result<TimeseriesResultValue>> queryPlus, ResponseContext responseContext) {
return Sequences.simple(ImmutableList.of(new Result<>(DateTimes.of("2014-01-09"), new TimeseriesResultValue(ImmutableMap.of("metric", 2))), new Result<>(DateTimes.of("2014-01-11"), new TimeseriesResultValue(ImmutableMap.of("metric", 3))), new Result<>(queryPlus.getQuery().getIntervals().get(0).getEnd(), new TimeseriesResultValue(ImmutableMap.of("metric", 5)))));
}
}, DateTimes.of("2014-08-02").getMillis());
final Query<Result<TimeseriesResultValue>> query = Druids.newTimeseriesQueryBuilder().dataSource("dummy").intervals("2014-07-31/2014-08-05").aggregators(Collections.singletonList(new CountAggregatorFactory("count"))).build();
Assert.assertEquals(Lists.newArrayList(new Result<>(DateTimes.of("2014-07-31"), new TimeseriesResultValue(ImmutableMap.of("metric", 2))), new Result<>(DateTimes.of("2014-08-02"), new TimeseriesResultValue(ImmutableMap.of("metric", 3))), new Result<>(DateTimes.of("2014-08-02"), new TimeseriesResultValue(ImmutableMap.of("metric", 5)))), queryRunner.run(QueryPlus.wrap(query)).toList());
TimewarpOperator<Result<TimeBoundaryResultValue>> timeBoundaryOperator = new TimewarpOperator<>(new Interval(DateTimes.of("2014-01-01"), DateTimes.of("2014-01-15")), new Period("P1W"), // align on Monday
DateTimes.of("2014-01-06"));
QueryRunner<Result<TimeBoundaryResultValue>> timeBoundaryRunner = timeBoundaryOperator.postProcess(new QueryRunner<Result<TimeBoundaryResultValue>>() {
@Override
public Sequence<Result<TimeBoundaryResultValue>> run(QueryPlus<Result<TimeBoundaryResultValue>> queryPlus, ResponseContext responseContext) {
return Sequences.simple(ImmutableList.of(new Result<>(DateTimes.of("2014-01-12"), new TimeBoundaryResultValue(ImmutableMap.<String, Object>of("maxTime", DateTimes.of("2014-01-12"))))));
}
}, DateTimes.of("2014-08-02").getMillis());
final Query<Result<TimeBoundaryResultValue>> timeBoundaryQuery = Druids.newTimeBoundaryQueryBuilder().dataSource("dummy").build();
Assert.assertEquals(Collections.singletonList(new Result<>(DateTimes.of("2014-08-02"), new TimeBoundaryResultValue(ImmutableMap.<String, Object>of("maxTime", DateTimes.of("2014-08-02"))))), timeBoundaryRunner.run(QueryPlus.wrap(timeBoundaryQuery)).toList());
}
use of org.apache.druid.query.context.ResponseContext in project druid by druid-io.
the class SegmentMetadataQueryQueryToolChest method mergeResults.
@Override
public QueryRunner<SegmentAnalysis> mergeResults(final QueryRunner<SegmentAnalysis> runner) {
return new BySegmentSkippingQueryRunner<SegmentAnalysis>(runner) {
@Override
public Sequence<SegmentAnalysis> doRun(QueryRunner<SegmentAnalysis> baseRunner, QueryPlus<SegmentAnalysis> queryPlus, ResponseContext context) {
SegmentMetadataQuery updatedQuery = ((SegmentMetadataQuery) queryPlus.getQuery()).withFinalizedAnalysisTypes(config);
QueryPlus<SegmentAnalysis> updatedQueryPlus = queryPlus.withQuery(updatedQuery);
return new MappedSequence<>(CombiningSequence.create(baseRunner.run(updatedQueryPlus, context), makeOrdering(updatedQuery), createMergeFn(updatedQuery)), MERGE_TRANSFORM_FN::apply);
}
private Ordering<SegmentAnalysis> makeOrdering(SegmentMetadataQuery query) {
return (Ordering<SegmentAnalysis>) SegmentMetadataQueryQueryToolChest.this.createResultComparator(query);
}
private BinaryOperator<SegmentAnalysis> createMergeFn(final SegmentMetadataQuery inQ) {
return SegmentMetadataQueryQueryToolChest.this.createMergeFn(inQ);
}
};
}
Aggregations