Search in sources :

Example 6 with QueryInterruptedException

use of io.druid.query.QueryInterruptedException in project druid by druid-io.

the class SqlResourceTest method testCannotValidate.

@Test
public void testCannotValidate() throws Exception {
    final QueryInterruptedException exception = doPost(new SqlQuery("SELECT dim3 FROM druid.foo", null)).lhs;
    Assert.assertNotNull(exception);
    Assert.assertEquals(QueryInterruptedException.UNKNOWN_EXCEPTION, exception.getErrorCode());
    Assert.assertEquals(ValidationException.class.getName(), exception.getErrorClass());
    Assert.assertTrue(exception.getMessage().contains("Column 'dim3' not found in any table"));
}
Also used : SqlQuery(io.druid.sql.http.SqlQuery) ValidationException(org.apache.calcite.tools.ValidationException) QueryInterruptedException(io.druid.query.QueryInterruptedException) Test(org.junit.Test)

Example 7 with QueryInterruptedException

use of io.druid.query.QueryInterruptedException in project druid by druid-io.

the class GroupByMergingQueryRunnerV2 method run.

@Override
public Sequence<Row> run(final Query queryParam, final Map responseContext) {
    final GroupByQuery query = (GroupByQuery) queryParam;
    final GroupByQueryConfig querySpecificConfig = config.withOverrides(query);
    // CTX_KEY_MERGE_RUNNERS_USING_CHAINED_EXECUTION is here because realtime servers use nested mergeRunners calls
    // (one for the entire query and one for each sink). We only want the outer call to actually do merging with a
    // merge buffer, otherwise the query will allocate too many merge buffers. This is potentially sub-optimal as it
    // will involve materializing the results for each sink before starting to feed them into the outer merge buffer.
    // I'm not sure of a better way to do this without tweaking how realtime servers do queries.
    final boolean forceChainedExecution = query.getContextBoolean(CTX_KEY_MERGE_RUNNERS_USING_CHAINED_EXECUTION, false);
    final GroupByQuery queryForRunners = query.withOverriddenContext(ImmutableMap.<String, Object>of(CTX_KEY_MERGE_RUNNERS_USING_CHAINED_EXECUTION, true));
    if (BaseQuery.getContextBySegment(query, false) || forceChainedExecution) {
        return new ChainedExecutionQueryRunner(exec, queryWatcher, queryables).run(query, responseContext);
    }
    final boolean isSingleThreaded = querySpecificConfig.isSingleThreaded();
    final AggregatorFactory[] combiningAggregatorFactories = new AggregatorFactory[query.getAggregatorSpecs().size()];
    for (int i = 0; i < query.getAggregatorSpecs().size(); i++) {
        combiningAggregatorFactories[i] = query.getAggregatorSpecs().get(i).getCombiningFactory();
    }
    final File temporaryStorageDirectory = new File(processingTmpDir, String.format("druid-groupBy-%s_%s", UUID.randomUUID(), query.getId()));
    final int priority = BaseQuery.getContextPriority(query, 0);
    // Figure out timeoutAt time now, so we can apply the timeout to both the mergeBufferPool.take and the actual
    // query processing together.
    final Number queryTimeout = query.getContextValue(QueryContextKeys.TIMEOUT, null);
    final long timeoutAt = queryTimeout == null ? JodaUtils.MAX_INSTANT : System.currentTimeMillis() + queryTimeout.longValue();
    return new BaseSequence<>(new BaseSequence.IteratorMaker<Row, CloseableGrouperIterator<RowBasedKey, Row>>() {

        @Override
        public CloseableGrouperIterator<RowBasedKey, Row> make() {
            final List<ReferenceCountingResourceHolder> resources = Lists.newArrayList();
            try {
                final LimitedTemporaryStorage temporaryStorage = new LimitedTemporaryStorage(temporaryStorageDirectory, querySpecificConfig.getMaxOnDiskStorage());
                final ReferenceCountingResourceHolder<LimitedTemporaryStorage> temporaryStorageHolder = ReferenceCountingResourceHolder.fromCloseable(temporaryStorage);
                resources.add(temporaryStorageHolder);
                final ReferenceCountingResourceHolder<ByteBuffer> mergeBufferHolder;
                try {
                    // This will potentially block if there are no merge buffers left in the pool.
                    final long timeout = timeoutAt - System.currentTimeMillis();
                    if (timeout <= 0 || (mergeBufferHolder = mergeBufferPool.take(timeout)) == null) {
                        throw new TimeoutException();
                    }
                    resources.add(mergeBufferHolder);
                } catch (Exception e) {
                    throw new QueryInterruptedException(e);
                }
                Pair<Grouper<RowBasedKey>, Accumulator<Grouper<RowBasedKey>, Row>> pair = RowBasedGrouperHelper.createGrouperAccumulatorPair(query, false, null, config, Suppliers.ofInstance(mergeBufferHolder.get()), concurrencyHint, temporaryStorage, spillMapper, combiningAggregatorFactories);
                final Grouper<RowBasedKey> grouper = pair.lhs;
                final Accumulator<Grouper<RowBasedKey>, Row> accumulator = pair.rhs;
                grouper.init();
                final ReferenceCountingResourceHolder<Grouper<RowBasedKey>> grouperHolder = ReferenceCountingResourceHolder.fromCloseable(grouper);
                resources.add(grouperHolder);
                ListenableFuture<List<Boolean>> futures = Futures.allAsList(Lists.newArrayList(Iterables.transform(queryables, new Function<QueryRunner<Row>, ListenableFuture<Boolean>>() {

                    @Override
                    public ListenableFuture<Boolean> apply(final QueryRunner<Row> input) {
                        if (input == null) {
                            throw new ISE("Null queryRunner! Looks to be some segment unmapping action happening");
                        }
                        ListenableFuture<Boolean> future = exec.submit(new AbstractPrioritizedCallable<Boolean>(priority) {

                            @Override
                            public Boolean call() throws Exception {
                                try (Releaser bufferReleaser = mergeBufferHolder.increment();
                                    Releaser grouperReleaser = grouperHolder.increment()) {
                                    final Object retVal = input.run(queryForRunners, responseContext).accumulate(grouper, accumulator);
                                    // Return true if OK, false if resources were exhausted.
                                    return retVal == grouper;
                                } catch (QueryInterruptedException e) {
                                    throw e;
                                } catch (Exception e) {
                                    log.error(e, "Exception with one of the sequences!");
                                    throw Throwables.propagate(e);
                                }
                            }
                        });
                        if (isSingleThreaded) {
                            waitForFutureCompletion(query, Futures.allAsList(ImmutableList.of(future)), timeoutAt - System.currentTimeMillis());
                        }
                        return future;
                    }
                })));
                if (!isSingleThreaded) {
                    waitForFutureCompletion(query, futures, timeoutAt - System.currentTimeMillis());
                }
                return RowBasedGrouperHelper.makeGrouperIterator(grouper, query, new Closeable() {

                    @Override
                    public void close() throws IOException {
                        for (Closeable closeable : Lists.reverse(resources)) {
                            CloseQuietly.close(closeable);
                        }
                    }
                });
            } catch (Throwable e) {
                // Exception caught while setting up the iterator; release resources.
                for (Closeable closeable : Lists.reverse(resources)) {
                    CloseQuietly.close(closeable);
                }
                throw e;
            }
        }

        @Override
        public void cleanup(CloseableGrouperIterator<RowBasedKey, Row> iterFromMake) {
            iterFromMake.close();
        }
    });
}
Also used : Accumulator(io.druid.java.util.common.guava.Accumulator) Closeable(java.io.Closeable) ChainedExecutionQueryRunner(io.druid.query.ChainedExecutionQueryRunner) Function(com.google.common.base.Function) GroupByQuery(io.druid.query.groupby.GroupByQuery) Releaser(io.druid.collections.Releaser) List(java.util.List) ImmutableList(com.google.common.collect.ImmutableList) ISE(io.druid.java.util.common.ISE) TimeoutException(java.util.concurrent.TimeoutException) QueryInterruptedException(io.druid.query.QueryInterruptedException) Pair(io.druid.java.util.common.Pair) GroupByQueryConfig(io.druid.query.groupby.GroupByQueryConfig) RowBasedKey(io.druid.query.groupby.epinephelinae.RowBasedGrouperHelper.RowBasedKey) AggregatorFactory(io.druid.query.aggregation.AggregatorFactory) BaseSequence(io.druid.java.util.common.guava.BaseSequence) TimeoutException(java.util.concurrent.TimeoutException) CancellationException(java.util.concurrent.CancellationException) QueryInterruptedException(io.druid.query.QueryInterruptedException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) ChainedExecutionQueryRunner(io.druid.query.ChainedExecutionQueryRunner) QueryRunner(io.druid.query.QueryRunner) ReferenceCountingResourceHolder(io.druid.collections.ReferenceCountingResourceHolder) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) Row(io.druid.data.input.Row) File(java.io.File)

Example 8 with QueryInterruptedException

use of io.druid.query.QueryInterruptedException in project druid by druid-io.

the class QueryResource method doPost.

@POST
@Produces({ MediaType.APPLICATION_JSON, SmileMediaTypes.APPLICATION_JACKSON_SMILE })
@Consumes({ MediaType.APPLICATION_JSON, SmileMediaTypes.APPLICATION_JACKSON_SMILE, APPLICATION_SMILE })
public Response doPost(InputStream in, @QueryParam("pretty") String pretty, // used to get request content-type, remote address and AuthorizationInfo
@Context final HttpServletRequest req) throws IOException {
    final long start = System.currentTimeMillis();
    Query query = null;
    QueryToolChest toolChest = null;
    String queryId = null;
    final ResponseContext context = createContext(req.getContentType(), pretty != null);
    final String currThreadName = Thread.currentThread().getName();
    try {
        query = context.getObjectMapper().readValue(in, Query.class);
        queryId = query.getId();
        if (queryId == null) {
            queryId = UUID.randomUUID().toString();
            query = query.withId(queryId);
        }
        if (query.getContextValue(QueryContextKeys.TIMEOUT) == null) {
            query = query.withOverriddenContext(ImmutableMap.of(QueryContextKeys.TIMEOUT, config.getMaxIdleTime().toStandardDuration().getMillis()));
        }
        toolChest = warehouse.getToolChest(query);
        Thread.currentThread().setName(String.format("%s[%s_%s_%s]", currThreadName, query.getType(), query.getDataSource().getNames(), queryId));
        if (log.isDebugEnabled()) {
            log.debug("Got query [%s]", query);
        }
        if (authConfig.isEnabled()) {
            // This is an experimental feature, see - https://github.com/druid-io/druid/pull/2424
            AuthorizationInfo authorizationInfo = (AuthorizationInfo) req.getAttribute(AuthConfig.DRUID_AUTH_TOKEN);
            if (authorizationInfo != null) {
                for (String dataSource : query.getDataSource().getNames()) {
                    Access authResult = authorizationInfo.isAuthorized(new Resource(dataSource, ResourceType.DATASOURCE), Action.READ);
                    if (!authResult.isAllowed()) {
                        return Response.status(Response.Status.FORBIDDEN).header("Access-Check-Result", authResult).build();
                    }
                }
            } else {
                throw new ISE("WTF?! Security is enabled but no authorization info found in the request");
            }
        }
        String prevEtag = req.getHeader(HDR_IF_NONE_MATCH);
        if (prevEtag != null) {
            query = query.withOverriddenContext(ImmutableMap.of(HDR_IF_NONE_MATCH, prevEtag));
        }
        final Map<String, Object> responseContext = new MapMaker().makeMap();
        final Sequence res = query.run(texasRanger, responseContext);
        if (prevEtag != null && prevEtag.equals(responseContext.get(HDR_ETAG))) {
            return Response.notModified().build();
        }
        final Sequence results;
        if (res == null) {
            results = Sequences.empty();
        } else {
            results = res;
        }
        final Yielder yielder = Yielders.each(results);
        try {
            final Query theQuery = query;
            final QueryToolChest theToolChest = toolChest;
            final ObjectWriter jsonWriter = context.newOutputWriter();
            Response.ResponseBuilder builder = Response.ok(new StreamingOutput() {

                @Override
                public void write(OutputStream outputStream) throws IOException, WebApplicationException {
                    try {
                        // json serializer will always close the yielder
                        CountingOutputStream os = new CountingOutputStream(outputStream);
                        jsonWriter.writeValue(os, yielder);
                        // Some types of OutputStream suppress flush errors in the .close() method.
                        os.flush();
                        os.close();
                        successfulQueryCount.incrementAndGet();
                        final long queryTime = System.currentTimeMillis() - start;
                        emitter.emit(DruidMetrics.makeQueryTimeMetric(theToolChest, jsonMapper, theQuery, req.getRemoteAddr()).setDimension("success", "true").build("query/time", queryTime));
                        emitter.emit(DruidMetrics.makeQueryTimeMetric(theToolChest, jsonMapper, theQuery, req.getRemoteAddr()).build("query/bytes", os.getCount()));
                        requestLogger.log(new RequestLogLine(new DateTime(start), req.getRemoteAddr(), theQuery, new QueryStats(ImmutableMap.<String, Object>of("query/time", queryTime, "query/bytes", os.getCount(), "success", true))));
                    } finally {
                        Thread.currentThread().setName(currThreadName);
                    }
                }
            }, context.getContentType()).header("X-Druid-Query-Id", queryId);
            if (responseContext.get(HDR_ETAG) != null) {
                builder.header(HDR_ETAG, responseContext.get(HDR_ETAG));
                responseContext.remove(HDR_ETAG);
            }
            //Limit the response-context header, see https://github.com/druid-io/druid/issues/2331
            //Note that Response.ResponseBuilder.header(String key,Object value).build() calls value.toString()
            //and encodes the string using ASCII, so 1 char is = 1 byte
            String responseCtxString = jsonMapper.writeValueAsString(responseContext);
            if (responseCtxString.length() > RESPONSE_CTX_HEADER_LEN_LIMIT) {
                log.warn("Response Context truncated for id [%s] . Full context is [%s].", queryId, responseCtxString);
                responseCtxString = responseCtxString.substring(0, RESPONSE_CTX_HEADER_LEN_LIMIT);
            }
            return builder.header("X-Druid-Response-Context", responseCtxString).build();
        } catch (Exception e) {
            // make sure to close yielder if anything happened before starting to serialize the response.
            yielder.close();
            throw Throwables.propagate(e);
        } finally {
        // do not close yielder here, since we do not want to close the yielder prior to
        // StreamingOutput having iterated over all the results
        }
    } catch (QueryInterruptedException e) {
        try {
            log.warn(e, "Exception while processing queryId [%s]", queryId);
            interruptedQueryCount.incrementAndGet();
            final long queryTime = System.currentTimeMillis() - start;
            emitter.emit(DruidMetrics.makeQueryTimeMetric(toolChest, jsonMapper, query, req.getRemoteAddr()).setDimension("success", "false").build("query/time", queryTime));
            requestLogger.log(new RequestLogLine(new DateTime(start), req.getRemoteAddr(), query, new QueryStats(ImmutableMap.<String, Object>of("query/time", queryTime, "success", false, "interrupted", true, "reason", e.toString()))));
        } catch (Exception e2) {
            log.error(e2, "Unable to log query [%s]!", query);
        }
        return context.gotError(e);
    } catch (Exception e) {
        // Input stream has already been consumed by the json object mapper if query == null
        final String queryString = query == null ? "unparsable query" : query.toString();
        log.warn(e, "Exception occurred on request [%s]", queryString);
        failedQueryCount.incrementAndGet();
        try {
            final long queryTime = System.currentTimeMillis() - start;
            emitter.emit(DruidMetrics.makeQueryTimeMetric(toolChest, jsonMapper, query, req.getRemoteAddr()).setDimension("success", "false").build("query/time", queryTime));
            requestLogger.log(new RequestLogLine(new DateTime(start), req.getRemoteAddr(), query, new QueryStats(ImmutableMap.<String, Object>of("query/time", queryTime, "success", false, "exception", e.toString()))));
        } catch (Exception e2) {
            log.error(e2, "Unable to log query [%s]!", queryString);
        }
        log.makeAlert(e, "Exception handling request").addData("exception", e.toString()).addData("query", queryString).addData("peer", req.getRemoteAddr()).emit();
        return context.gotError(e);
    } finally {
        Thread.currentThread().setName(currThreadName);
    }
}
Also used : Query(io.druid.query.Query) CountingOutputStream(com.google.common.io.CountingOutputStream) OutputStream(java.io.OutputStream) Access(io.druid.server.security.Access) StreamingOutput(javax.ws.rs.core.StreamingOutput) QueryToolChest(io.druid.query.QueryToolChest) DateTime(org.joda.time.DateTime) CountingOutputStream(com.google.common.io.CountingOutputStream) ISE(io.druid.java.util.common.ISE) QueryInterruptedException(io.druid.query.QueryInterruptedException) Yielder(io.druid.java.util.common.guava.Yielder) Resource(io.druid.server.security.Resource) MapMaker(com.google.common.collect.MapMaker) ObjectWriter(com.fasterxml.jackson.databind.ObjectWriter) Sequence(io.druid.java.util.common.guava.Sequence) AuthorizationInfo(io.druid.server.security.AuthorizationInfo) WebApplicationException(javax.ws.rs.WebApplicationException) QueryInterruptedException(io.druid.query.QueryInterruptedException) IOException(java.io.IOException) Response(javax.ws.rs.core.Response) POST(javax.ws.rs.POST) Produces(javax.ws.rs.Produces) Consumes(javax.ws.rs.Consumes)

Example 9 with QueryInterruptedException

use of io.druid.query.QueryInterruptedException in project druid by druid-io.

the class SegmentMetadataQueryRunnerFactory method mergeRunners.

@Override
public QueryRunner<SegmentAnalysis> mergeRunners(ExecutorService exec, Iterable<QueryRunner<SegmentAnalysis>> queryRunners) {
    final ListeningExecutorService queryExecutor = MoreExecutors.listeningDecorator(exec);
    return new ConcatQueryRunner<SegmentAnalysis>(Sequences.map(Sequences.simple(queryRunners), new Function<QueryRunner<SegmentAnalysis>, QueryRunner<SegmentAnalysis>>() {

        @Override
        public QueryRunner<SegmentAnalysis> apply(final QueryRunner<SegmentAnalysis> input) {
            return new QueryRunner<SegmentAnalysis>() {

                @Override
                public Sequence<SegmentAnalysis> run(final Query<SegmentAnalysis> query, final Map<String, Object> responseContext) {
                    final int priority = BaseQuery.getContextPriority(query, 0);
                    ListenableFuture<Sequence<SegmentAnalysis>> future = queryExecutor.submit(new AbstractPrioritizedCallable<Sequence<SegmentAnalysis>>(priority) {

                        @Override
                        public Sequence<SegmentAnalysis> call() throws Exception {
                            return Sequences.simple(Sequences.toList(input.run(query, responseContext), new ArrayList<SegmentAnalysis>()));
                        }
                    });
                    try {
                        queryWatcher.registerQuery(query, future);
                        final Number timeout = query.getContextValue(QueryContextKeys.TIMEOUT, (Number) null);
                        return timeout == null ? future.get() : future.get(timeout.longValue(), TimeUnit.MILLISECONDS);
                    } catch (InterruptedException e) {
                        log.warn(e, "Query interrupted, cancelling pending results, query id [%s]", query.getId());
                        future.cancel(true);
                        throw new QueryInterruptedException(e);
                    } catch (CancellationException e) {
                        throw new QueryInterruptedException(e);
                    } catch (TimeoutException e) {
                        log.info("Query timeout, cancelling pending results for query id [%s]", query.getId());
                        future.cancel(true);
                        throw new QueryInterruptedException(e);
                    } catch (ExecutionException e) {
                        throw Throwables.propagate(e.getCause());
                    }
                }
            };
        }
    }));
}
Also used : BaseQuery(io.druid.query.BaseQuery) SegmentMetadataQuery(io.druid.query.metadata.metadata.SegmentMetadataQuery) Query(io.druid.query.Query) Sequence(io.druid.java.util.common.guava.Sequence) QueryInterruptedException(io.druid.query.QueryInterruptedException) QueryRunner(io.druid.query.QueryRunner) ConcatQueryRunner(io.druid.query.ConcatQueryRunner) TimeoutException(java.util.concurrent.TimeoutException) QueryInterruptedException(io.druid.query.QueryInterruptedException) CancellationException(java.util.concurrent.CancellationException) ExecutionException(java.util.concurrent.ExecutionException) Function(com.google.common.base.Function) CancellationException(java.util.concurrent.CancellationException) ConcatQueryRunner(io.druid.query.ConcatQueryRunner) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) SegmentAnalysis(io.druid.query.metadata.metadata.SegmentAnalysis) ExecutionException(java.util.concurrent.ExecutionException) Map(java.util.Map) QueryInterruptedException(io.druid.query.QueryInterruptedException) TimeoutException(java.util.concurrent.TimeoutException)

Example 10 with QueryInterruptedException

use of io.druid.query.QueryInterruptedException in project druid by druid-io.

the class DirectDruidClientTest method testQueryInterruptionExceptionLogMessage.

@Test
public void testQueryInterruptionExceptionLogMessage() throws JsonProcessingException {
    HttpClient httpClient = EasyMock.createMock(HttpClient.class);
    SettableFuture<Object> interruptionFuture = SettableFuture.create();
    Capture<Request> capturedRequest = EasyMock.newCapture();
    String hostName = "localhost:8080";
    EasyMock.expect(httpClient.go(EasyMock.capture(capturedRequest), EasyMock.<HttpResponseHandler>anyObject())).andReturn(interruptionFuture).anyTimes();
    EasyMock.replay(httpClient);
    DataSegment dataSegment = new DataSegment("test", new Interval("2013-01-01/2013-01-02"), new DateTime("2013-01-01").toString(), Maps.<String, Object>newHashMap(), Lists.<String>newArrayList(), Lists.<String>newArrayList(), NoneShardSpec.instance(), 0, 0L);
    final ServerSelector serverSelector = new ServerSelector(dataSegment, new HighestPriorityTierSelectorStrategy(new ConnectionCountServerSelectorStrategy()));
    DirectDruidClient client1 = new DirectDruidClient(new ReflectionQueryToolChestWarehouse(), QueryRunnerTestHelper.NOOP_QUERYWATCHER, new DefaultObjectMapper(), httpClient, hostName, new NoopServiceEmitter());
    QueryableDruidServer queryableDruidServer = new QueryableDruidServer(new DruidServer("test1", hostName, 0, "historical", DruidServer.DEFAULT_TIER, 0), client1);
    serverSelector.addServerAndUpdateSegment(queryableDruidServer, dataSegment);
    TimeBoundaryQuery query = Druids.newTimeBoundaryQueryBuilder().dataSource("test").build();
    HashMap<String, List> context = Maps.newHashMap();
    interruptionFuture.set(new ByteArrayInputStream("{\"error\":\"testing1\",\"errorMessage\":\"testing2\"}".getBytes()));
    Sequence results = client1.run(query, context);
    QueryInterruptedException actualException = null;
    try {
        Sequences.toList(results, Lists.newArrayList());
    } catch (QueryInterruptedException e) {
        actualException = e;
    }
    Assert.assertNotNull(actualException);
    Assert.assertEquals("testing1", actualException.getErrorCode());
    Assert.assertEquals("testing2", actualException.getMessage());
    Assert.assertEquals(hostName, actualException.getHost());
    EasyMock.verify(httpClient);
}
Also used : TimeBoundaryQuery(io.druid.query.timeboundary.TimeBoundaryQuery) DataSegment(io.druid.timeline.DataSegment) DateTime(org.joda.time.DateTime) QueryableDruidServer(io.druid.client.selector.QueryableDruidServer) ServerSelector(io.druid.client.selector.ServerSelector) HighestPriorityTierSelectorStrategy(io.druid.client.selector.HighestPriorityTierSelectorStrategy) List(java.util.List) QueryInterruptedException(io.druid.query.QueryInterruptedException) ConnectionCountServerSelectorStrategy(io.druid.client.selector.ConnectionCountServerSelectorStrategy) Request(com.metamx.http.client.Request) QueryableDruidServer(io.druid.client.selector.QueryableDruidServer) NoopServiceEmitter(io.druid.server.metrics.NoopServiceEmitter) Sequence(io.druid.java.util.common.guava.Sequence) ByteArrayInputStream(java.io.ByteArrayInputStream) HttpClient(com.metamx.http.client.HttpClient) DefaultObjectMapper(io.druid.jackson.DefaultObjectMapper) HttpResponseHandler(com.metamx.http.client.response.HttpResponseHandler) ReflectionQueryToolChestWarehouse(io.druid.query.ReflectionQueryToolChestWarehouse) Interval(org.joda.time.Interval) Test(org.junit.Test)

Aggregations

QueryInterruptedException (io.druid.query.QueryInterruptedException)10 ISE (io.druid.java.util.common.ISE)4 Sequence (io.druid.java.util.common.guava.Sequence)4 List (java.util.List)4 Test (org.junit.Test)4 Function (com.google.common.base.Function)3 SqlQuery (io.druid.sql.http.SqlQuery)3 TimeoutException (java.util.concurrent.TimeoutException)3 DateTime (org.joda.time.DateTime)3 HttpClient (com.metamx.http.client.HttpClient)2 Request (com.metamx.http.client.Request)2 HttpResponseHandler (com.metamx.http.client.response.HttpResponseHandler)2 ConnectionCountServerSelectorStrategy (io.druid.client.selector.ConnectionCountServerSelectorStrategy)2 HighestPriorityTierSelectorStrategy (io.druid.client.selector.HighestPriorityTierSelectorStrategy)2 QueryableDruidServer (io.druid.client.selector.QueryableDruidServer)2 ServerSelector (io.druid.client.selector.ServerSelector)2 DefaultObjectMapper (io.druid.jackson.DefaultObjectMapper)2 BaseSequence (io.druid.java.util.common.guava.BaseSequence)2 Query (io.druid.query.Query)2 QueryRunner (io.druid.query.QueryRunner)2