Search in sources :

Example 16 with TopNQueryBuilder

use of org.apache.druid.query.topn.TopNQueryBuilder in project druid by druid-io.

the class ClientQuerySegmentWalkerTest method testTopNScanMultiValue.

@Test
public void testTopNScanMultiValue() {
    ScanQuery subquery = new Druids.ScanQueryBuilder().dataSource(MULTI).columns("s", "n").intervals(new MultipleIntervalSegmentSpec(ImmutableList.of(Intervals.ETERNITY))).legacy(false).resultFormat(ScanQuery.ResultFormat.RESULT_FORMAT_COMPACTED_LIST).build();
    final TopNQuery query = (TopNQuery) new TopNQueryBuilder().dataSource(new QueryDataSource(subquery)).granularity(Granularities.ALL).intervals(Intervals.ONLY_ETERNITY).dimension(DefaultDimensionSpec.of("s")).metric("sum_n").threshold(100).aggregators(new LongSumAggregatorFactory("sum_n", "n")).build().withId(DUMMY_QUERY_ID);
    testQuery(query, // GroupBy handles its own subqueries; only the inner one will go to the cluster.
    ImmutableList.of(ExpectedQuery.cluster(subquery.withId(DUMMY_QUERY_ID).withSubQueryId("1.1")), ExpectedQuery.local(query.withDataSource(InlineDataSource.fromIterable(ImmutableList.of(new Object[] { ImmutableList.of("a", "b"), 1 }, new Object[] { ImmutableList.of("a", "c"), 2 }, new Object[] { ImmutableList.of("b"), 3 }, new Object[] { ImmutableList.of("c"), 4 }), RowSignature.builder().add("s", null).add("n", null).build())))), ImmutableList.of(new Object[] { Intervals.ETERNITY.getStartMillis(), "c", 6L }, new Object[] { Intervals.ETERNITY.getStartMillis(), "b", 4L }, new Object[] { Intervals.ETERNITY.getStartMillis(), "a", 3L }));
    Assert.assertEquals(2, scheduler.getTotalRun().get());
    Assert.assertEquals(1, scheduler.getTotalPrioritizedAndLaned().get());
    Assert.assertEquals(2, scheduler.getTotalAcquired().get());
    Assert.assertEquals(2, scheduler.getTotalReleased().get());
}
Also used : TopNQueryBuilder(org.apache.druid.query.topn.TopNQueryBuilder) QueryDataSource(org.apache.druid.query.QueryDataSource) LongSumAggregatorFactory(org.apache.druid.query.aggregation.LongSumAggregatorFactory) ScanQuery(org.apache.druid.query.scan.ScanQuery) TopNQuery(org.apache.druid.query.topn.TopNQuery) MultipleIntervalSegmentSpec(org.apache.druid.query.spec.MultipleIntervalSegmentSpec) Test(org.junit.Test)

Example 17 with TopNQueryBuilder

use of org.apache.druid.query.topn.TopNQueryBuilder in project druid by druid-io.

the class DummyStringVirtualColumnTest method testTopN.

private void testTopN(List<Segment> segments, boolean enableRowBasedMethods, boolean enableColumnBasedMethods) {
    TopNQuery query = new TopNQueryBuilder().dataSource(QueryRunnerTestHelper.DATA_SOURCE).granularity(Granularities.ALL).dimension(VSTRING_DIM).metric(COUNT).threshold(1).aggregators(new CountAggregatorFactory(COUNT)).virtualColumns(new DummyStringVirtualColumn(QueryRunnerTestHelper.MARKET_DIMENSION, VSTRING_DIM, enableRowBasedMethods, enableColumnBasedMethods, false, true)).intervals("2000/2030").build();
    List rows = topNTestHelper.runQueryOnSegmentsObjs(segments, query).toList();
    List<Result<TopNResultValue>> expectedRows = Collections.singletonList(new Result<>(DateTimes.of("2011-01-12T00:00:00.000Z"), new TopNResultValue(Collections.<Map<String, Object>>singletonList(ImmutableMap.<String, Object>builder().put(COUNT, 1674L).put(VSTRING_DIM, "spot").build()))));
    TestHelper.assertExpectedResults(expectedRows, (List<Result<TopNResultValue>>) rows, "failed");
}
Also used : TopNQueryBuilder(org.apache.druid.query.topn.TopNQueryBuilder) TopNResultValue(org.apache.druid.query.topn.TopNResultValue) CountAggregatorFactory(org.apache.druid.query.aggregation.CountAggregatorFactory) TopNQuery(org.apache.druid.query.topn.TopNQuery) List(java.util.List) Result(org.apache.druid.query.Result)

Example 18 with TopNQueryBuilder

use of org.apache.druid.query.topn.TopNQueryBuilder in project druid by druid-io.

the class SchemalessTestSimpleTest method testFullOnTopN.

// @Test TODO: Handling of null values is inconsistent right now, need to make it all consistent and re-enable test
// TODO: Complain to Eric when you see this.  It shouldn't be like this...
@Ignore
@SuppressWarnings("unused")
public void testFullOnTopN() {
    TopNQuery query = new TopNQueryBuilder().dataSource(dataSource).granularity(ALL_GRAN).dimension(marketDimension).metric(indexMetric).threshold(3).intervals(fullOnInterval).aggregators(Lists.newArrayList(Iterables.concat(commonAggregators, Lists.newArrayList(new DoubleMaxAggregatorFactory("maxIndex", "index"), new DoubleMinAggregatorFactory("minIndex", "index"))))).postAggregators(addRowsIndexConstant).build();
    List<Result<TopNResultValue>> expectedResults = Collections.singletonList(new Result<>(DateTimes.of("2011-01-12T00:00:00.000Z"), new TopNResultValue(Arrays.asList(new DimensionAndMetricValueExtractor(ImmutableMap.<String, Object>builder().put("market", "spot").put("rows", 4L).put("index", 400.0D).put("addRowsIndexConstant", 405.0D).put("uniques", 1.0002442201269182D).put("maxIndex", 100.0).put("minIndex", 100.0).build()), new DimensionAndMetricValueExtractor(ImmutableMap.<String, Object>builder().put("market", "").put("rows", 2L).put("index", 200.0D).put("addRowsIndexConstant", 203.0D).put("uniques", 0.0).put("maxIndex", 100.0D).put("minIndex", 100.0D).build()), new DimensionAndMetricValueExtractor(ImmutableMap.<String, Object>builder().put("market", "total_market").put("rows", 2L).put("index", 200.0D).put("addRowsIndexConstant", 203.0D).put("uniques", 1.0002442201269182D).put("maxIndex", 100.0D).put("minIndex", 100.0D).build())))));
    try (CloseableStupidPool<ByteBuffer> pool = TestQueryRunners.createDefaultNonBlockingPool()) {
        QueryRunner runner = TestQueryRunners.makeTopNQueryRunner(segment, pool);
        TestHelper.assertExpectedResults(expectedResults, runner.run(QueryPlus.wrap(query)));
    }
}
Also used : TopNQueryBuilder(org.apache.druid.query.topn.TopNQueryBuilder) TopNResultValue(org.apache.druid.query.topn.TopNResultValue) DoubleMaxAggregatorFactory(org.apache.druid.query.aggregation.DoubleMaxAggregatorFactory) TopNQuery(org.apache.druid.query.topn.TopNQuery) DoubleMinAggregatorFactory(org.apache.druid.query.aggregation.DoubleMinAggregatorFactory) ByteBuffer(java.nio.ByteBuffer) DimensionAndMetricValueExtractor(org.apache.druid.query.topn.DimensionAndMetricValueExtractor) QueryRunner(org.apache.druid.query.QueryRunner) Result(org.apache.druid.query.Result) Ignore(org.junit.Ignore)

Example 19 with TopNQueryBuilder

use of org.apache.druid.query.topn.TopNQueryBuilder in project druid by druid-io.

the class JavaScriptTieredBrokerSelectorStrategyTest method testGetBrokerServiceName.

@Test
public void testGetBrokerServiceName() {
    final LinkedHashMap<String, String> tierBrokerMap = new LinkedHashMap<>();
    tierBrokerMap.put("fast", "druid/broker");
    tierBrokerMap.put("slow", "druid/slowBroker");
    final TieredBrokerConfig tieredBrokerConfig = new TieredBrokerConfig() {

        @Override
        public String getDefaultBrokerServiceName() {
            return "druid/broker";
        }

        @Override
        public LinkedHashMap<String, String> getTierToBrokerMap() {
            return tierBrokerMap;
        }
    };
    final TopNQueryBuilder queryBuilder = new TopNQueryBuilder().dataSource("test").intervals("2014/2015").dimension("bigdim").metric("count").threshold(1).aggregators(new CountAggregatorFactory("count"));
    Assert.assertEquals(Optional.absent(), STRATEGY.getBrokerServiceName(tieredBrokerConfig, queryBuilder.build()));
    Assert.assertEquals(Optional.absent(), STRATEGY.getBrokerServiceName(tieredBrokerConfig, Druids.newTimeBoundaryQueryBuilder().dataSource("test").bound("maxTime").build()));
    Assert.assertEquals(Optional.of("druid/slowBroker"), STRATEGY.getBrokerServiceName(tieredBrokerConfig, queryBuilder.aggregators(new CountAggregatorFactory("count"), new LongSumAggregatorFactory("longSum", "a"), new DoubleSumAggregatorFactory("doubleSum", "b")).build()));
    // in absence of tiers, expect the default
    tierBrokerMap.clear();
    Assert.assertEquals(Optional.of("druid/broker"), STRATEGY.getBrokerServiceName(tieredBrokerConfig, queryBuilder.aggregators(new CountAggregatorFactory("count"), new LongSumAggregatorFactory("longSum", "a"), new DoubleSumAggregatorFactory("doubleSum", "b")).build()));
}
Also used : TopNQueryBuilder(org.apache.druid.query.topn.TopNQueryBuilder) CountAggregatorFactory(org.apache.druid.query.aggregation.CountAggregatorFactory) DoubleSumAggregatorFactory(org.apache.druid.query.aggregation.DoubleSumAggregatorFactory) LongSumAggregatorFactory(org.apache.druid.query.aggregation.LongSumAggregatorFactory) LinkedHashMap(java.util.LinkedHashMap) Test(org.junit.Test)

Example 20 with TopNQueryBuilder

use of org.apache.druid.query.topn.TopNQueryBuilder in project hive by apache.

the class DruidStorageHandlerUtils method addDynamicFilters.

public static org.apache.druid.query.Query addDynamicFilters(org.apache.druid.query.Query query, ExprNodeGenericFuncDesc filterExpr, Configuration conf, boolean resolveDynamicValues) {
    List<VirtualColumn> virtualColumns = Arrays.asList(getVirtualColumns(query).getVirtualColumns());
    org.apache.druid.query.Query rv = query;
    DimFilter joinReductionFilter = toDruidFilter(filterExpr, conf, virtualColumns, resolveDynamicValues);
    if (joinReductionFilter != null) {
        String type = query.getType();
        DimFilter filter = new AndDimFilter(joinReductionFilter, query.getFilter());
        switch(type) {
            case org.apache.druid.query.Query.TIMESERIES:
                rv = Druids.TimeseriesQueryBuilder.copy((TimeseriesQuery) query).filters(filter).virtualColumns(VirtualColumns.create(virtualColumns)).build();
                break;
            case org.apache.druid.query.Query.TOPN:
                rv = new TopNQueryBuilder((TopNQuery) query).filters(filter).virtualColumns(VirtualColumns.create(virtualColumns)).build();
                break;
            case org.apache.druid.query.Query.GROUP_BY:
                rv = new GroupByQuery.Builder((GroupByQuery) query).setDimFilter(filter).setVirtualColumns(VirtualColumns.create(virtualColumns)).build();
                break;
            case org.apache.druid.query.Query.SCAN:
                rv = Druids.ScanQueryBuilder.copy((ScanQuery) query).filters(filter).virtualColumns(VirtualColumns.create(virtualColumns)).build();
                break;
            default:
                throw new UnsupportedOperationException("Unsupported Query type " + type);
        }
    }
    return rv;
}
Also used : TopNQueryBuilder(org.apache.druid.query.topn.TopNQueryBuilder) AndDimFilter(org.apache.druid.query.filter.AndDimFilter) TopNQueryBuilder(org.apache.druid.query.topn.TopNQueryBuilder) GenericUDFToString(org.apache.hadoop.hive.ql.udf.generic.GenericUDFToString) VirtualColumn(org.apache.druid.segment.VirtualColumn) ExpressionVirtualColumn(org.apache.druid.segment.virtual.ExpressionVirtualColumn) AndDimFilter(org.apache.druid.query.filter.AndDimFilter) DimFilter(org.apache.druid.query.filter.DimFilter) BoundDimFilter(org.apache.druid.query.filter.BoundDimFilter) BloomDimFilter(org.apache.druid.query.filter.BloomDimFilter) OrDimFilter(org.apache.druid.query.filter.OrDimFilter)

Aggregations

TopNQueryBuilder (org.apache.druid.query.topn.TopNQueryBuilder)55 Test (org.junit.Test)44 TopNQuery (org.apache.druid.query.topn.TopNQuery)25 DefaultDimensionSpec (org.apache.druid.query.dimension.DefaultDimensionSpec)22 LongSumAggregatorFactory (org.apache.druid.query.aggregation.LongSumAggregatorFactory)16 InvertedTopNMetricSpec (org.apache.druid.query.topn.InvertedTopNMetricSpec)14 CountAggregatorFactory (org.apache.druid.query.aggregation.CountAggregatorFactory)13 TopNQueryQueryToolChest (org.apache.druid.query.topn.TopNQueryQueryToolChest)13 Result (org.apache.druid.query.Result)12 TopNQueryConfig (org.apache.druid.query.topn.TopNQueryConfig)12 TopNResultValue (org.apache.druid.query.topn.TopNResultValue)11 DoubleMinAggregatorFactory (org.apache.druid.query.aggregation.DoubleMinAggregatorFactory)9 DimensionTopNMetricSpec (org.apache.druid.query.topn.DimensionTopNMetricSpec)9 NumericTopNMetricSpec (org.apache.druid.query.topn.NumericTopNMetricSpec)9 DoubleMaxAggregatorFactory (org.apache.druid.query.aggregation.DoubleMaxAggregatorFactory)7 InitializedNullHandlingTest (org.apache.druid.testing.InitializedNullHandlingTest)7 ByteBuffer (java.nio.ByteBuffer)5 QueryRunner (org.apache.druid.query.QueryRunner)5 MultipleIntervalSegmentSpec (org.apache.druid.query.spec.MultipleIntervalSegmentSpec)5 HashMap (java.util.HashMap)4