use of org.apache.druid.query.QueryTimeoutException in project druid by druid-io.
the class QueryLifecycle method emitLogsAndMetrics.
/**
* Emit logs and metrics for this query.
*
* @param e exception that occurred while processing this query
* @param remoteAddress remote address, for logging; or null if unknown
* @param bytesWritten number of bytes written; will become a query/bytes metric if >= 0
*/
@SuppressWarnings("unchecked")
public void emitLogsAndMetrics(@Nullable final Throwable e, @Nullable final String remoteAddress, final long bytesWritten) {
if (baseQuery == null) {
// Never initialized, don't log or emit anything.
return;
}
if (state == State.DONE) {
log.warn("Tried to emit logs and metrics twice for query[%s]!", baseQuery.getId());
}
state = State.DONE;
final boolean success = e == null;
try {
final long queryTimeNs = System.nanoTime() - startNs;
QueryMetrics queryMetrics = DruidMetrics.makeRequestMetrics(queryMetricsFactory, toolChest, baseQuery, StringUtils.nullToEmptyNonDruidDataString(remoteAddress));
queryMetrics.success(success);
queryMetrics.reportQueryTime(queryTimeNs);
if (bytesWritten >= 0) {
queryMetrics.reportQueryBytes(bytesWritten);
}
if (authenticationResult != null) {
queryMetrics.identity(authenticationResult.getIdentity());
}
queryMetrics.emit(emitter);
final Map<String, Object> statsMap = new LinkedHashMap<>();
statsMap.put("query/time", TimeUnit.NANOSECONDS.toMillis(queryTimeNs));
statsMap.put("query/bytes", bytesWritten);
statsMap.put("success", success);
if (authenticationResult != null) {
statsMap.put("identity", authenticationResult.getIdentity());
}
if (e != null) {
statsMap.put("exception", e.toString());
if (QueryContexts.isDebug(baseQuery)) {
log.warn(e, "Exception while processing queryId [%s]", baseQuery.getId());
} else {
log.noStackTrace().warn(e, "Exception while processing queryId [%s]", baseQuery.getId());
}
if (e instanceof QueryInterruptedException || e instanceof QueryTimeoutException) {
// Mimic behavior from QueryResource, where this code was originally taken from.
statsMap.put("interrupted", true);
statsMap.put("reason", e.toString());
}
}
requestLogger.logNativeQuery(RequestLogLine.forNative(baseQuery, DateTimes.utc(startMs), StringUtils.nullToEmptyNonDruidDataString(remoteAddress), new QueryStats(statsMap)));
} catch (Exception ex) {
log.error(ex, "Unable to log query [%s]!", baseQuery);
}
}
use of org.apache.druid.query.QueryTimeoutException in project druid by druid-io.
the class JsonParserIterator method init.
private void init() {
if (jp == null) {
try {
long timeLeftMillis = timeoutAt - System.currentTimeMillis();
if (checkTimeout(timeLeftMillis)) {
throw timeoutQuery();
}
InputStream is = hasTimeout ? future.get(timeLeftMillis, TimeUnit.MILLISECONDS) : future.get();
if (is != null) {
jp = objectMapper.getFactory().createParser(is);
} else if (checkTimeout()) {
throw timeoutQuery();
} else {
// TODO: NettyHttpClient should check the actual cause of the failure and set it in the future properly.
throw ResourceLimitExceededException.withMessage("Possibly max scatter-gather bytes limit reached while reading from url[%s].", url);
}
final JsonToken nextToken = jp.nextToken();
if (nextToken == JsonToken.START_ARRAY) {
jp.nextToken();
objectCodec = jp.getCodec();
} else if (nextToken == JsonToken.START_OBJECT) {
throw convertException(jp.getCodec().readValue(jp, QueryException.class));
} else {
throw convertException(new IAE("Next token wasn't a START_ARRAY, was[%s] from url[%s]", jp.getCurrentToken(), url));
}
} catch (ExecutionException | CancellationException e) {
throw convertException(e.getCause() == null ? e : e.getCause());
} catch (IOException | InterruptedException e) {
throw convertException(e);
} catch (TimeoutException e) {
throw new QueryTimeoutException(StringUtils.nonStrictFormat("Query [%s] timed out!", queryId), host);
}
}
}
use of org.apache.druid.query.QueryTimeoutException in project druid by druid-io.
the class ServerManagerForQueryErrorTest method buildQueryRunnerForSegment.
@Override
protected <T> QueryRunner<T> buildQueryRunnerForSegment(Query<T> query, SegmentDescriptor descriptor, QueryRunnerFactory<T, Query<T>> factory, QueryToolChest<T, Query<T>> toolChest, VersionedIntervalTimeline<String, ReferenceCountingSegment> timeline, Function<SegmentReference, SegmentReference> segmentMapFn, AtomicLong cpuTimeAccumulator, Optional<byte[]> cacheKeyPrefix) {
if (query.getContextBoolean(QUERY_RETRY_TEST_CONTEXT_KEY, false)) {
final MutableBoolean isIgnoreSegment = new MutableBoolean(false);
queryToIgnoredSegments.compute(query.getMostSpecificId(), (queryId, ignoredSegments) -> {
if (ignoredSegments == null) {
ignoredSegments = new HashSet<>();
}
if (ignoredSegments.size() < MAX_NUM_FALSE_MISSING_SEGMENTS_REPORTS) {
ignoredSegments.add(descriptor);
isIgnoreSegment.setTrue();
}
return ignoredSegments;
});
if (isIgnoreSegment.isTrue()) {
LOG.info("Pretending I don't have segment[%s]", descriptor);
return new ReportTimelineMissingSegmentQueryRunner<>(descriptor);
}
} else if (query.getContextBoolean(QUERY_TIMEOUT_TEST_CONTEXT_KEY, false)) {
return (queryPlus, responseContext) -> new Sequence<T>() {
@Override
public <OutType> OutType accumulate(OutType initValue, Accumulator<OutType, T> accumulator) {
throw new QueryTimeoutException("query timeout test");
}
@Override
public <OutType> Yielder<OutType> toYielder(OutType initValue, YieldingAccumulator<OutType, T> accumulator) {
throw new QueryTimeoutException("query timeout test");
}
};
} else if (query.getContextBoolean(QUERY_CAPACITY_EXCEEDED_TEST_CONTEXT_KEY, false)) {
return (queryPlus, responseContext) -> new Sequence<T>() {
@Override
public <OutType> OutType accumulate(OutType initValue, Accumulator<OutType, T> accumulator) {
throw QueryCapacityExceededException.withErrorMessageAndResolvedHost("query capacity exceeded test");
}
@Override
public <OutType> Yielder<OutType> toYielder(OutType initValue, YieldingAccumulator<OutType, T> accumulator) {
throw QueryCapacityExceededException.withErrorMessageAndResolvedHost("query capacity exceeded test");
}
};
} else if (query.getContextBoolean(QUERY_UNSUPPORTED_TEST_CONTEXT_KEY, false)) {
return (queryPlus, responseContext) -> new Sequence<T>() {
@Override
public <OutType> OutType accumulate(OutType initValue, Accumulator<OutType, T> accumulator) {
throw new QueryUnsupportedException("query unsupported test");
}
@Override
public <OutType> Yielder<OutType> toYielder(OutType initValue, YieldingAccumulator<OutType, T> accumulator) {
throw new QueryUnsupportedException("query unsupported test");
}
};
} else if (query.getContextBoolean(RESOURCE_LIMIT_EXCEEDED_TEST_CONTEXT_KEY, false)) {
return (queryPlus, responseContext) -> new Sequence<T>() {
@Override
public <OutType> OutType accumulate(OutType initValue, Accumulator<OutType, T> accumulator) {
throw new ResourceLimitExceededException("resource limit exceeded test");
}
@Override
public <OutType> Yielder<OutType> toYielder(OutType initValue, YieldingAccumulator<OutType, T> accumulator) {
throw new ResourceLimitExceededException("resource limit exceeded test");
}
};
} else if (query.getContextBoolean(QUERY_FAILURE_TEST_CONTEXT_KEY, false)) {
return (queryPlus, responseContext) -> new Sequence<T>() {
@Override
public <OutType> OutType accumulate(OutType initValue, Accumulator<OutType, T> accumulator) {
throw new RuntimeException("query failure test");
}
@Override
public <OutType> Yielder<OutType> toYielder(OutType initValue, YieldingAccumulator<OutType, T> accumulator) {
throw new RuntimeException("query failure test");
}
};
}
return super.buildQueryRunnerForSegment(query, descriptor, factory, toolChest, timeline, segmentMapFn, cpuTimeAccumulator, cacheKeyPrefix);
}
use of org.apache.druid.query.QueryTimeoutException in project druid by druid-io.
the class ConcurrentGrouperTest method testGrouperTimeout.
@Test
public void testGrouperTimeout() throws Exception {
final ConcurrentGrouper<Long> grouper = new ConcurrentGrouper<>(bufferSupplier, TEST_RESOURCE_HOLDER, KEY_SERDE_FACTORY, KEY_SERDE_FACTORY, NULL_FACTORY, new AggregatorFactory[] { new CountAggregatorFactory("cnt") }, 1024, 0.7f, 1, new LimitedTemporaryStorage(temporaryFolder.newFolder(), 1024 * 1024), new DefaultObjectMapper(), 8, null, false, MoreExecutors.listeningDecorator(SERVICE), 0, true, 1, 4, 8);
grouper.init();
final int numRows = 1000;
Future<?>[] futures = new Future[8];
for (int i = 0; i < 8; i++) {
futures[i] = SERVICE.submit(new Runnable() {
@Override
public void run() {
for (long i = 0; i < numRows; i++) {
grouper.aggregate(i);
}
}
});
}
for (Future eachFuture : futures) {
eachFuture.get();
}
try {
grouper.iterator(true);
} catch (RuntimeException e) {
Assert.assertTrue(e instanceof QueryTimeoutException);
Assert.assertEquals("Query timeout", ((QueryTimeoutException) e).getErrorCode());
}
grouper.close();
}
use of org.apache.druid.query.QueryTimeoutException in project druid by druid-io.
the class DirectDruidClientTest method testQueryTimeoutFromFuture.
@Test
public void testQueryTimeoutFromFuture() {
SettableFuture<Object> noFuture = SettableFuture.create();
Capture<Request> capturedRequest = EasyMock.newCapture();
final String queryId = "never-ending-future";
EasyMock.expect(httpClient.go(EasyMock.capture(capturedRequest), EasyMock.<HttpResponseHandler>anyObject(), EasyMock.anyObject(Duration.class))).andReturn(noFuture).anyTimes();
EasyMock.replay(httpClient);
TimeBoundaryQuery query = Druids.newTimeBoundaryQueryBuilder().dataSource("test").build();
query = query.withOverriddenContext(ImmutableMap.of(DirectDruidClient.QUERY_FAIL_TIME, System.currentTimeMillis() + 500, "queryId", queryId));
Sequence results = client.run(QueryPlus.wrap(query));
QueryTimeoutException actualException = null;
try {
results.toList();
} catch (QueryTimeoutException e) {
actualException = e;
}
Assert.assertNotNull(actualException);
Assert.assertEquals("Query timeout", actualException.getErrorCode());
Assert.assertEquals(StringUtils.format("Query [%s] timed out!", queryId), actualException.getMessage());
Assert.assertEquals(hostName, actualException.getHost());
EasyMock.verify(httpClient);
}
Aggregations