use of org.apache.druid.query.QueryUnsupportedException in project druid by druid-io.
the class SqlResource method doPost.
@POST
@Produces(MediaType.APPLICATION_JSON)
@Consumes(MediaType.APPLICATION_JSON)
public Response doPost(final SqlQuery sqlQuery, @Context final HttpServletRequest req) throws IOException {
final SqlLifecycle lifecycle = sqlLifecycleFactory.factorize();
final String sqlQueryId = lifecycle.initialize(sqlQuery.getQuery(), sqlQuery.getContext());
final String remoteAddr = req.getRemoteAddr();
final String currThreadName = Thread.currentThread().getName();
try {
Thread.currentThread().setName(StringUtils.format("sql[%s]", sqlQueryId));
lifecycle.setParameters(sqlQuery.getParameterList());
lifecycle.validateAndAuthorize(req);
// must add after lifecycle is authorized
sqlLifecycleManager.add(sqlQueryId, lifecycle);
lifecycle.plan();
final SqlRowTransformer rowTransformer = lifecycle.createRowTransformer();
final Sequence<Object[]> sequence = lifecycle.execute();
final Yielder<Object[]> yielder0 = Yielders.each(sequence);
try {
final Response.ResponseBuilder responseBuilder = Response.ok((StreamingOutput) outputStream -> {
Exception e = null;
CountingOutputStream os = new CountingOutputStream(outputStream);
Yielder<Object[]> yielder = yielder0;
try (final ResultFormat.Writer writer = sqlQuery.getResultFormat().createFormatter(os, jsonMapper)) {
writer.writeResponseStart();
if (sqlQuery.includeHeader()) {
writer.writeHeader(rowTransformer.getRowType(), sqlQuery.includeTypesHeader(), sqlQuery.includeSqlTypesHeader());
}
while (!yielder.isDone()) {
final Object[] row = yielder.get();
writer.writeRowStart();
for (int i = 0; i < rowTransformer.getFieldList().size(); i++) {
final Object value = rowTransformer.transform(row, i);
writer.writeRowField(rowTransformer.getFieldList().get(i), value);
}
writer.writeRowEnd();
yielder = yielder.next(null);
}
writer.writeResponseEnd();
} catch (Exception ex) {
e = ex;
log.error(ex, "Unable to send SQL response [%s]", sqlQueryId);
throw new RuntimeException(ex);
} finally {
yielder.close();
endLifecycle(sqlQueryId, lifecycle, e, remoteAddr, os.getCount());
}
}).header(SQL_QUERY_ID_RESPONSE_HEADER, sqlQueryId);
if (sqlQuery.includeHeader()) {
responseBuilder.header(SQL_HEADER_RESPONSE_HEADER, SQL_HEADER_VALUE);
}
return responseBuilder.build();
} catch (Throwable e) {
// make sure to close yielder if anything happened before starting to serialize the response.
yielder0.close();
throw new RuntimeException(e);
}
} catch (QueryCapacityExceededException cap) {
endLifecycle(sqlQueryId, lifecycle, cap, remoteAddr, -1);
return buildNonOkResponse(QueryCapacityExceededException.STATUS_CODE, cap, sqlQueryId);
} catch (QueryUnsupportedException unsupported) {
endLifecycle(sqlQueryId, lifecycle, unsupported, remoteAddr, -1);
return buildNonOkResponse(QueryUnsupportedException.STATUS_CODE, unsupported, sqlQueryId);
} catch (QueryTimeoutException timeout) {
endLifecycle(sqlQueryId, lifecycle, timeout, remoteAddr, -1);
return buildNonOkResponse(QueryTimeoutException.STATUS_CODE, timeout, sqlQueryId);
} catch (SqlPlanningException | ResourceLimitExceededException e) {
endLifecycle(sqlQueryId, lifecycle, e, remoteAddr, -1);
return buildNonOkResponse(BadQueryException.STATUS_CODE, e, sqlQueryId);
} catch (ForbiddenException e) {
endLifecycleWithoutEmittingMetrics(sqlQueryId, lifecycle);
throw (ForbiddenException) serverConfig.getErrorResponseTransformStrategy().transformIfNeeded(// let ForbiddenExceptionMapper handle this
e);
} catch (RelOptPlanner.CannotPlanException e) {
endLifecycle(sqlQueryId, lifecycle, e, remoteAddr, -1);
SqlPlanningException spe = new SqlPlanningException(SqlPlanningException.PlanningError.UNSUPPORTED_SQL_ERROR, e.getMessage());
return buildNonOkResponse(BadQueryException.STATUS_CODE, spe, sqlQueryId);
}// calcite throws a java.lang.AssertionError which is type error not exception. using throwable will catch all
catch (Throwable e) {
log.warn(e, "Failed to handle query: %s", sqlQuery);
endLifecycle(sqlQueryId, lifecycle, e, remoteAddr, -1);
return buildNonOkResponse(Status.INTERNAL_SERVER_ERROR.getStatusCode(), QueryInterruptedException.wrapIfNeeded(e), sqlQueryId);
} finally {
Thread.currentThread().setName(currThreadName);
}
}
use of org.apache.druid.query.QueryUnsupportedException in project druid by druid-io.
the class SqlResourceTest method testUnsupportedQueryThrowsExceptionWithFilterResponse.
@Test
public void testUnsupportedQueryThrowsExceptionWithFilterResponse() throws Exception {
resource = new SqlResource(JSON_MAPPER, CalciteTests.TEST_AUTHORIZER_MAPPER, sqlLifecycleFactory, lifecycleManager, new ServerConfig() {
@Override
public boolean isShowDetailedJettyErrors() {
return true;
}
@Override
public ErrorResponseTransformStrategy getErrorResponseTransformStrategy() {
return new AllowedRegexErrorResponseTransformStrategy(ImmutableList.of());
}
});
String errorMessage = "This will be support in Druid 9999";
SqlQuery badQuery = EasyMock.createMock(SqlQuery.class);
EasyMock.expect(badQuery.getQuery()).andReturn("SELECT ANSWER TO LIFE");
EasyMock.expect(badQuery.getContext()).andReturn(ImmutableMap.of("sqlQueryId", "id"));
EasyMock.expect(badQuery.getParameterList()).andThrow(new QueryUnsupportedException(errorMessage));
EasyMock.replay(badQuery);
final QueryException exception = doPost(badQuery).lhs;
Assert.assertNotNull(exception);
Assert.assertNull(exception.getMessage());
Assert.assertNull(exception.getHost());
Assert.assertEquals(exception.getErrorCode(), QueryUnsupportedException.ERROR_CODE);
Assert.assertNull(exception.getErrorClass());
Assert.assertTrue(lifecycleManager.getAll("id").isEmpty());
}
use of org.apache.druid.query.QueryUnsupportedException in project druid by druid-io.
the class QueryResource method doPost.
@POST
@Produces({ MediaType.APPLICATION_JSON, SmileMediaTypes.APPLICATION_JACKSON_SMILE })
@Consumes({ MediaType.APPLICATION_JSON, SmileMediaTypes.APPLICATION_JACKSON_SMILE, APPLICATION_SMILE })
public Response doPost(final InputStream in, @QueryParam("pretty") final String pretty, // used to get request content-type,Accept header, remote address and auth-related headers
@Context final HttpServletRequest req) throws IOException {
final QueryLifecycle queryLifecycle = queryLifecycleFactory.factorize();
Query<?> query = null;
final ResourceIOReaderWriter ioReaderWriter = createResourceIOReaderWriter(req, pretty != null);
final String currThreadName = Thread.currentThread().getName();
try {
queryLifecycle.initialize(readQuery(req, in, ioReaderWriter));
query = queryLifecycle.getQuery();
final String queryId = query.getId();
final String queryThreadName = StringUtils.format("%s[%s_%s_%s]", currThreadName, query.getType(), query.getDataSource().getTableNames(), queryId);
Thread.currentThread().setName(queryThreadName);
if (log.isDebugEnabled()) {
log.debug("Got query [%s]", query);
}
final Access authResult = queryLifecycle.authorize(req);
if (!authResult.isAllowed()) {
throw new ForbiddenException(authResult.toString());
}
final QueryLifecycle.QueryResponse queryResponse = queryLifecycle.execute();
final Sequence<?> results = queryResponse.getResults();
final ResponseContext responseContext = queryResponse.getResponseContext();
final String prevEtag = getPreviousEtag(req);
if (prevEtag != null && prevEtag.equals(responseContext.getEntityTag())) {
queryLifecycle.emitLogsAndMetrics(null, req.getRemoteAddr(), -1);
successfulQueryCount.incrementAndGet();
return Response.notModified().build();
}
final Yielder<?> yielder = Yielders.each(results);
try {
boolean shouldFinalize = QueryContexts.isFinalize(query, true);
boolean serializeDateTimeAsLong = QueryContexts.isSerializeDateTimeAsLong(query, false) || (!shouldFinalize && QueryContexts.isSerializeDateTimeAsLongInner(query, false));
final ObjectWriter jsonWriter = ioReaderWriter.getResponseWriter().newOutputWriter(queryLifecycle.getToolChest(), queryLifecycle.getQuery(), serializeDateTimeAsLong);
Response.ResponseBuilder responseBuilder = Response.ok(new StreamingOutput() {
@Override
public void write(OutputStream outputStream) throws WebApplicationException {
Exception e = null;
CountingOutputStream os = new CountingOutputStream(outputStream);
try {
// json serializer will always close the yielder
jsonWriter.writeValue(os, yielder);
// Some types of OutputStream suppress flush errors in the .close() method.
os.flush();
os.close();
} catch (Exception ex) {
e = ex;
log.noStackTrace().error(ex, "Unable to send query response.");
throw new RuntimeException(ex);
} finally {
Thread.currentThread().setName(currThreadName);
queryLifecycle.emitLogsAndMetrics(e, req.getRemoteAddr(), os.getCount());
if (e == null) {
successfulQueryCount.incrementAndGet();
} else {
failedQueryCount.incrementAndGet();
}
}
}
}, ioReaderWriter.getResponseWriter().getResponseType()).header("X-Druid-Query-Id", queryId);
transferEntityTag(responseContext, responseBuilder);
DirectDruidClient.removeMagicResponseContextFields(responseContext);
// Limit the response-context header, see https://github.com/apache/druid/issues/2331
// Note that Response.ResponseBuilder.header(String key,Object value).build() calls value.toString()
// and encodes the string using ASCII, so 1 char is = 1 byte
final ResponseContext.SerializationResult serializationResult = responseContext.serializeWith(jsonMapper, responseContextConfig.getMaxResponseContextHeaderSize());
if (serializationResult.isTruncated()) {
final String logToPrint = StringUtils.format("Response Context truncated for id [%s]. Full context is [%s].", queryId, serializationResult.getFullResult());
if (responseContextConfig.shouldFailOnTruncatedResponseContext()) {
log.error(logToPrint);
throw new QueryInterruptedException(new TruncatedResponseContextException("Serialized response context exceeds the max size[%s]", responseContextConfig.getMaxResponseContextHeaderSize()), selfNode.getHostAndPortToUse());
} else {
log.warn(logToPrint);
}
}
return responseBuilder.header(HEADER_RESPONSE_CONTEXT, serializationResult.getResult()).build();
} catch (QueryException e) {
// make sure to close yielder if anything happened before starting to serialize the response.
yielder.close();
throw e;
} catch (Exception e) {
// make sure to close yielder if anything happened before starting to serialize the response.
yielder.close();
throw new RuntimeException(e);
} finally {
// do not close yielder here, since we do not want to close the yielder prior to
// StreamingOutput having iterated over all the results
}
} catch (QueryInterruptedException e) {
interruptedQueryCount.incrementAndGet();
queryLifecycle.emitLogsAndMetrics(e, req.getRemoteAddr(), -1);
return ioReaderWriter.getResponseWriter().gotError(e);
} catch (QueryTimeoutException timeout) {
timedOutQueryCount.incrementAndGet();
queryLifecycle.emitLogsAndMetrics(timeout, req.getRemoteAddr(), -1);
return ioReaderWriter.getResponseWriter().gotTimeout(timeout);
} catch (QueryCapacityExceededException cap) {
failedQueryCount.incrementAndGet();
queryLifecycle.emitLogsAndMetrics(cap, req.getRemoteAddr(), -1);
return ioReaderWriter.getResponseWriter().gotLimited(cap);
} catch (QueryUnsupportedException unsupported) {
failedQueryCount.incrementAndGet();
queryLifecycle.emitLogsAndMetrics(unsupported, req.getRemoteAddr(), -1);
return ioReaderWriter.getResponseWriter().gotUnsupported(unsupported);
} catch (BadJsonQueryException | ResourceLimitExceededException e) {
interruptedQueryCount.incrementAndGet();
queryLifecycle.emitLogsAndMetrics(e, req.getRemoteAddr(), -1);
return ioReaderWriter.getResponseWriter().gotBadQuery(e);
} catch (ForbiddenException e) {
// send an error response if this is thrown.
throw e;
} catch (Exception e) {
failedQueryCount.incrementAndGet();
queryLifecycle.emitLogsAndMetrics(e, req.getRemoteAddr(), -1);
log.noStackTrace().makeAlert(e, "Exception handling request").addData("query", query != null ? jsonMapper.writeValueAsString(query) : "unparseable query").addData("peer", req.getRemoteAddr()).emit();
return ioReaderWriter.getResponseWriter().gotError(e);
} finally {
Thread.currentThread().setName(currThreadName);
}
}
use of org.apache.druid.query.QueryUnsupportedException in project druid by druid-io.
the class ServerManager method getQueryRunnerForSegments.
@Override
public <T> QueryRunner<T> getQueryRunnerForSegments(Query<T> query, Iterable<SegmentDescriptor> specs) {
final QueryRunnerFactory<T, Query<T>> factory = conglomerate.findFactory(query);
if (factory == null) {
final QueryUnsupportedException e = new QueryUnsupportedException(StringUtils.format("Unknown query type, [%s]", query.getClass()));
log.makeAlert(e, "Error while executing a query[%s]", query.getId()).addData("dataSource", query.getDataSource()).emit();
throw e;
}
final QueryToolChest<T, Query<T>> toolChest = factory.getToolchest();
final DataSourceAnalysis analysis = DataSourceAnalysis.forDataSource(query.getDataSource());
final AtomicLong cpuTimeAccumulator = new AtomicLong(0L);
final VersionedIntervalTimeline<String, ReferenceCountingSegment> timeline;
final Optional<VersionedIntervalTimeline<String, ReferenceCountingSegment>> maybeTimeline = segmentManager.getTimeline(analysis);
// Make sure this query type can handle the subquery, if present.
if (analysis.isQuery() && !toolChest.canPerformSubquery(((QueryDataSource) analysis.getDataSource()).getQuery())) {
throw new ISE("Cannot handle subquery: %s", analysis.getDataSource());
}
if (maybeTimeline.isPresent()) {
timeline = maybeTimeline.get();
} else {
return new ReportTimelineMissingSegmentQueryRunner<>(Lists.newArrayList(specs));
}
// segmentMapFn maps each base Segment into a joined Segment if necessary.
final Function<SegmentReference, SegmentReference> segmentMapFn = joinableFactoryWrapper.createSegmentMapFn(analysis.getJoinBaseTableFilter().map(Filters::toFilter).orElse(null), analysis.getPreJoinableClauses(), cpuTimeAccumulator, analysis.getBaseQuery().orElse(query));
// We compute the join cache key here itself so it doesn't need to be re-computed for every segment
final Optional<byte[]> cacheKeyPrefix = analysis.isJoin() ? joinableFactoryWrapper.computeJoinDataSourceCacheKey(analysis) : Optional.of(StringUtils.EMPTY_BYTES);
final FunctionalIterable<QueryRunner<T>> queryRunners = FunctionalIterable.create(specs).transformCat(descriptor -> Collections.singletonList(buildQueryRunnerForSegment(query, descriptor, factory, toolChest, timeline, segmentMapFn, cpuTimeAccumulator, cacheKeyPrefix)));
return CPUTimeMetricQueryRunner.safeBuild(new FinalizeResultsQueryRunner<>(toolChest.mergeResults(factory.mergeRunners(queryProcessingPool, queryRunners)), toolChest), toolChest, emitter, cpuTimeAccumulator, true);
}
use of org.apache.druid.query.QueryUnsupportedException in project druid by druid-io.
the class ServerManagerForQueryErrorTest method buildQueryRunnerForSegment.
@Override
protected <T> QueryRunner<T> buildQueryRunnerForSegment(Query<T> query, SegmentDescriptor descriptor, QueryRunnerFactory<T, Query<T>> factory, QueryToolChest<T, Query<T>> toolChest, VersionedIntervalTimeline<String, ReferenceCountingSegment> timeline, Function<SegmentReference, SegmentReference> segmentMapFn, AtomicLong cpuTimeAccumulator, Optional<byte[]> cacheKeyPrefix) {
if (query.getContextBoolean(QUERY_RETRY_TEST_CONTEXT_KEY, false)) {
final MutableBoolean isIgnoreSegment = new MutableBoolean(false);
queryToIgnoredSegments.compute(query.getMostSpecificId(), (queryId, ignoredSegments) -> {
if (ignoredSegments == null) {
ignoredSegments = new HashSet<>();
}
if (ignoredSegments.size() < MAX_NUM_FALSE_MISSING_SEGMENTS_REPORTS) {
ignoredSegments.add(descriptor);
isIgnoreSegment.setTrue();
}
return ignoredSegments;
});
if (isIgnoreSegment.isTrue()) {
LOG.info("Pretending I don't have segment[%s]", descriptor);
return new ReportTimelineMissingSegmentQueryRunner<>(descriptor);
}
} else if (query.getContextBoolean(QUERY_TIMEOUT_TEST_CONTEXT_KEY, false)) {
return (queryPlus, responseContext) -> new Sequence<T>() {
@Override
public <OutType> OutType accumulate(OutType initValue, Accumulator<OutType, T> accumulator) {
throw new QueryTimeoutException("query timeout test");
}
@Override
public <OutType> Yielder<OutType> toYielder(OutType initValue, YieldingAccumulator<OutType, T> accumulator) {
throw new QueryTimeoutException("query timeout test");
}
};
} else if (query.getContextBoolean(QUERY_CAPACITY_EXCEEDED_TEST_CONTEXT_KEY, false)) {
return (queryPlus, responseContext) -> new Sequence<T>() {
@Override
public <OutType> OutType accumulate(OutType initValue, Accumulator<OutType, T> accumulator) {
throw QueryCapacityExceededException.withErrorMessageAndResolvedHost("query capacity exceeded test");
}
@Override
public <OutType> Yielder<OutType> toYielder(OutType initValue, YieldingAccumulator<OutType, T> accumulator) {
throw QueryCapacityExceededException.withErrorMessageAndResolvedHost("query capacity exceeded test");
}
};
} else if (query.getContextBoolean(QUERY_UNSUPPORTED_TEST_CONTEXT_KEY, false)) {
return (queryPlus, responseContext) -> new Sequence<T>() {
@Override
public <OutType> OutType accumulate(OutType initValue, Accumulator<OutType, T> accumulator) {
throw new QueryUnsupportedException("query unsupported test");
}
@Override
public <OutType> Yielder<OutType> toYielder(OutType initValue, YieldingAccumulator<OutType, T> accumulator) {
throw new QueryUnsupportedException("query unsupported test");
}
};
} else if (query.getContextBoolean(RESOURCE_LIMIT_EXCEEDED_TEST_CONTEXT_KEY, false)) {
return (queryPlus, responseContext) -> new Sequence<T>() {
@Override
public <OutType> OutType accumulate(OutType initValue, Accumulator<OutType, T> accumulator) {
throw new ResourceLimitExceededException("resource limit exceeded test");
}
@Override
public <OutType> Yielder<OutType> toYielder(OutType initValue, YieldingAccumulator<OutType, T> accumulator) {
throw new ResourceLimitExceededException("resource limit exceeded test");
}
};
} else if (query.getContextBoolean(QUERY_FAILURE_TEST_CONTEXT_KEY, false)) {
return (queryPlus, responseContext) -> new Sequence<T>() {
@Override
public <OutType> OutType accumulate(OutType initValue, Accumulator<OutType, T> accumulator) {
throw new RuntimeException("query failure test");
}
@Override
public <OutType> Yielder<OutType> toYielder(OutType initValue, YieldingAccumulator<OutType, T> accumulator) {
throw new RuntimeException("query failure test");
}
};
}
return super.buildQueryRunnerForSegment(query, descriptor, factory, toolChest, timeline, segmentMapFn, cpuTimeAccumulator, cacheKeyPrefix);
}
Aggregations