Search in sources :

Example 96 with TableDataSource

use of org.apache.druid.query.TableDataSource in project druid by druid-io.

the class DruidSchemaTest method testLocalSegmentCacheSetsDataSourceAsGlobalAndJoinable.

@Test
public void testLocalSegmentCacheSetsDataSourceAsGlobalAndJoinable() throws InterruptedException {
    DruidTable fooTable = (DruidTable) schema.getTableMap().get("foo");
    Assert.assertNotNull(fooTable);
    Assert.assertTrue(fooTable.getDataSource() instanceof TableDataSource);
    Assert.assertFalse(fooTable.getDataSource() instanceof GlobalTableDataSource);
    Assert.assertFalse(fooTable.isJoinable());
    Assert.assertFalse(fooTable.isBroadcast());
    Assert.assertTrue(buildTableLatch.await(1, TimeUnit.SECONDS));
    buildTableLatch = new CountDownLatch(1);
    final DataSegment someNewBrokerSegment = new DataSegment("foo", Intervals.of("2012/2013"), "version1", null, ImmutableList.of("dim1", "dim2"), ImmutableList.of("met1", "met2"), new NumberedShardSpec(2, 3), null, 1, 100L, PruneSpecsHolder.DEFAULT);
    segmentDataSourceNames.add("foo");
    joinableDataSourceNames.add("foo");
    serverView.addSegment(someNewBrokerSegment, ServerType.BROKER);
    Assert.assertTrue(markDataSourceLatch.await(2, TimeUnit.SECONDS));
    // wait for build twice
    Assert.assertTrue(buildTableLatch.await(2, TimeUnit.SECONDS));
    // wait for get again, just to make sure table has been updated (latch counts down just before tables are updated)
    Assert.assertTrue(getDatasourcesLatch.await(2, TimeUnit.SECONDS));
    fooTable = (DruidTable) schema.getTableMap().get("foo");
    Assert.assertNotNull(fooTable);
    Assert.assertTrue(fooTable.getDataSource() instanceof TableDataSource);
    Assert.assertTrue(fooTable.getDataSource() instanceof GlobalTableDataSource);
    Assert.assertTrue(fooTable.isJoinable());
    Assert.assertTrue(fooTable.isBroadcast());
    // now remove it
    markDataSourceLatch = new CountDownLatch(1);
    buildTableLatch = new CountDownLatch(1);
    getDatasourcesLatch = new CountDownLatch(1);
    joinableDataSourceNames.remove("foo");
    segmentDataSourceNames.remove("foo");
    serverView.removeSegment(someNewBrokerSegment, ServerType.BROKER);
    Assert.assertTrue(markDataSourceLatch.await(2, TimeUnit.SECONDS));
    // wait for build
    Assert.assertTrue(buildTableLatch.await(2, TimeUnit.SECONDS));
    // wait for get again, just to make sure table has been updated (latch counts down just before tables are updated)
    Assert.assertTrue(getDatasourcesLatch.await(2, TimeUnit.SECONDS));
    fooTable = (DruidTable) schema.getTableMap().get("foo");
    Assert.assertNotNull(fooTable);
    Assert.assertTrue(fooTable.getDataSource() instanceof TableDataSource);
    Assert.assertFalse(fooTable.getDataSource() instanceof GlobalTableDataSource);
    Assert.assertFalse(fooTable.isJoinable());
    Assert.assertFalse(fooTable.isBroadcast());
}
Also used : GlobalTableDataSource(org.apache.druid.query.GlobalTableDataSource) TableDataSource(org.apache.druid.query.TableDataSource) DruidTable(org.apache.druid.sql.calcite.table.DruidTable) GlobalTableDataSource(org.apache.druid.query.GlobalTableDataSource) CountDownLatch(java.util.concurrent.CountDownLatch) DataSegment(org.apache.druid.timeline.DataSegment) NumberedShardSpec(org.apache.druid.timeline.partition.NumberedShardSpec) Test(org.junit.Test)

Example 97 with TableDataSource

use of org.apache.druid.query.TableDataSource in project druid by druid-io.

the class TestClusterQuerySegmentWalker method getQueryRunnerForSegments.

@Override
public <T> QueryRunner<T> getQueryRunnerForSegments(final Query<T> query, final Iterable<SegmentDescriptor> specs) {
    final QueryRunnerFactory<T, Query<T>> factory = conglomerate.findFactory(query);
    if (factory == null) {
        throw new ISE("Unknown query type[%s].", query.getClass());
    }
    final DataSourceAnalysis analysis = DataSourceAnalysis.forDataSource(query.getDataSource());
    if (!analysis.isConcreteTableBased()) {
        throw new ISE("Cannot handle datasource: %s", query.getDataSource());
    }
    final String dataSourceName = ((TableDataSource) analysis.getBaseDataSource()).getName();
    final QueryToolChest<T, Query<T>> toolChest = factory.getToolchest();
    // Make sure this query type can handle the subquery, if present.
    if (analysis.isQuery() && !toolChest.canPerformSubquery(((QueryDataSource) analysis.getDataSource()).getQuery())) {
        throw new ISE("Cannot handle subquery: %s", analysis.getDataSource());
    }
    final Function<SegmentReference, SegmentReference> segmentMapFn = joinableFactoryWrapper.createSegmentMapFn(analysis.getJoinBaseTableFilter().map(Filters::toFilter).orElse(null), analysis.getPreJoinableClauses(), new AtomicLong(), analysis.getBaseQuery().orElse(query));
    final QueryRunner<T> baseRunner = new FinalizeResultsQueryRunner<>(toolChest.postMergeQueryDecoration(toolChest.mergeResults(toolChest.preMergeQueryDecoration(makeTableRunner(toolChest, factory, getSegmentsForTable(dataSourceName, specs), segmentMapFn)))), toolChest);
    // to actually serve the queries
    return (theQuery, responseContext) -> {
        responseContext.initializeRemainingResponses();
        responseContext.addRemainingResponse(theQuery.getQuery().getMostSpecificId(), 0);
        if (scheduler != null) {
            Set<SegmentServerSelector> segments = new HashSet<>();
            specs.forEach(spec -> segments.add(new SegmentServerSelector(spec)));
            return scheduler.run(scheduler.prioritizeAndLaneQuery(theQuery, segments), new LazySequence<>(() -> baseRunner.run(theQuery.withQuery(Queries.withSpecificSegments(theQuery.getQuery(), ImmutableList.copyOf(specs))), responseContext)));
        } else {
            return baseRunner.run(theQuery.withQuery(Queries.withSpecificSegments(theQuery.getQuery(), ImmutableList.copyOf(specs))), responseContext);
        }
    };
}
Also used : DataSourceAnalysis(org.apache.druid.query.planning.DataSourceAnalysis) JoinableFactory(org.apache.druid.segment.join.JoinableFactory) Function(java.util.function.Function) NoopQueryRunner(org.apache.druid.query.NoopQueryRunner) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) SegmentReference(org.apache.druid.segment.SegmentReference) PartitionChunk(org.apache.druid.timeline.partition.PartitionChunk) Interval(org.joda.time.Interval) Lists(com.google.common.collect.Lists) ImmutableList(com.google.common.collect.ImmutableList) Query(org.apache.druid.query.Query) SpecificSegmentQueryRunner(org.apache.druid.query.spec.SpecificSegmentQueryRunner) Map(java.util.Map) SegmentServerSelector(org.apache.druid.client.SegmentServerSelector) QueryRunner(org.apache.druid.query.QueryRunner) QuerySegmentWalker(org.apache.druid.query.QuerySegmentWalker) Nullable(javax.annotation.Nullable) FinalizeResultsQueryRunner(org.apache.druid.query.FinalizeResultsQueryRunner) LazySequence(org.apache.druid.java.util.common.guava.LazySequence) Execs(org.apache.druid.java.util.common.concurrent.Execs) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) QueryRunnerFactoryConglomerate(org.apache.druid.query.QueryRunnerFactoryConglomerate) QueryToolChest(org.apache.druid.query.QueryToolChest) JoinableFactoryWrapper(org.apache.druid.segment.join.JoinableFactoryWrapper) TimelineObjectHolder(org.apache.druid.timeline.TimelineObjectHolder) Set(java.util.Set) ISE(org.apache.druid.java.util.common.ISE) SpecificSegmentSpec(org.apache.druid.query.spec.SpecificSegmentSpec) ReferenceCountingSegment(org.apache.druid.segment.ReferenceCountingSegment) TableDataSource(org.apache.druid.query.TableDataSource) Queries(org.apache.druid.query.Queries) AtomicLong(java.util.concurrent.atomic.AtomicLong) QueryDataSource(org.apache.druid.query.QueryDataSource) List(java.util.List) ReferenceCountingSegmentQueryRunner(org.apache.druid.query.ReferenceCountingSegmentQueryRunner) QueryRunnerFactory(org.apache.druid.query.QueryRunnerFactory) Preconditions(com.google.common.base.Preconditions) FunctionalIterable(org.apache.druid.java.util.common.guava.FunctionalIterable) SegmentDescriptor(org.apache.druid.query.SegmentDescriptor) Filters(org.apache.druid.segment.filter.Filters) Collections(java.util.Collections) HashSet(java.util.HashSet) Set(java.util.Set) Query(org.apache.druid.query.Query) SegmentReference(org.apache.druid.segment.SegmentReference) DataSourceAnalysis(org.apache.druid.query.planning.DataSourceAnalysis) Filters(org.apache.druid.segment.filter.Filters) AtomicLong(java.util.concurrent.atomic.AtomicLong) TableDataSource(org.apache.druid.query.TableDataSource) FinalizeResultsQueryRunner(org.apache.druid.query.FinalizeResultsQueryRunner) SegmentServerSelector(org.apache.druid.client.SegmentServerSelector) ISE(org.apache.druid.java.util.common.ISE) LazySequence(org.apache.druid.java.util.common.guava.LazySequence)

Example 98 with TableDataSource

use of org.apache.druid.query.TableDataSource in project druid by druid-io.

the class CalciteJoinQueryTest method testFilterAndGroupByLookupUsingJoinOperator.

@Test
@Parameters(source = QueryContextForJoinProvider.class)
public void testFilterAndGroupByLookupUsingJoinOperator(Map<String, Object> queryContext) throws Exception {
    // Cannot vectorize JOIN operator.
    cannotVectorize();
    testQuery("SELECT lookyloo.k, COUNT(*)\n" + "FROM foo LEFT JOIN lookup.lookyloo ON foo.dim2 = lookyloo.k\n" + "WHERE lookyloo.v = 'xa'\n" + "GROUP BY lookyloo.k", queryContext, ImmutableList.of(GroupByQuery.builder().setDataSource(join(new TableDataSource(CalciteTests.DATASOURCE1), new LookupDataSource("lookyloo"), "j0.", equalsCondition(makeColumnExpression("dim2"), makeColumnExpression("j0.k")), JoinType.LEFT)).setInterval(querySegmentSpec(Filtration.eternity())).setDimFilter(selector("j0.v", "xa", null)).setGranularity(Granularities.ALL).setDimensions(dimensions(new DefaultDimensionSpec("j0.k", "d0"))).setAggregatorSpecs(aggregators(new CountAggregatorFactory("a0"))).setContext(queryContext).build()), ImmutableList.of(new Object[] { "a", 2L }));
}
Also used : GlobalTableDataSource(org.apache.druid.query.GlobalTableDataSource) TableDataSource(org.apache.druid.query.TableDataSource) CountAggregatorFactory(org.apache.druid.query.aggregation.CountAggregatorFactory) LookupDataSource(org.apache.druid.query.LookupDataSource) DefaultDimensionSpec(org.apache.druid.query.dimension.DefaultDimensionSpec) Parameters(junitparams.Parameters) Test(org.junit.Test)

Example 99 with TableDataSource

use of org.apache.druid.query.TableDataSource in project druid by druid-io.

the class CalciteJoinQueryTest method testExactTopNOnInnerJoinWithLimit.

@Test
public void testExactTopNOnInnerJoinWithLimit() throws Exception {
    // Adjust topN threshold, so that the topN engine keeps only 1 slot for aggregates, which should be enough
    // to compute the query with limit 1.
    minTopNThreshold = 1;
    Map<String, Object> context = new HashMap<>(QUERY_CONTEXT_DEFAULT);
    context.put(PlannerConfig.CTX_KEY_USE_APPROXIMATE_TOPN, false);
    testQuery("select f1.\"dim4\", sum(\"m1\") from numfoo f1 inner join (\n" + "  select \"dim4\" from numfoo where dim4 <> 'a' group by 1\n" + ") f2 on f1.\"dim4\" = f2.\"dim4\" group by 1 limit 1", // turn on exact topN
    context, ImmutableList.of(new TopNQueryBuilder().intervals(querySegmentSpec(Filtration.eternity())).granularity(Granularities.ALL).dimension(new DefaultDimensionSpec("dim4", "_d0")).aggregators(new DoubleSumAggregatorFactory("a0", "m1")).metric(new DimensionTopNMetricSpec(null, StringComparators.LEXICOGRAPHIC)).threshold(1).dataSource(JoinDataSource.create(new TableDataSource("numfoo"), new QueryDataSource(GroupByQuery.builder().setInterval(querySegmentSpec(Filtration.eternity())).setGranularity(Granularities.ALL).setDimFilter(new NotDimFilter(new SelectorDimFilter("dim4", "a", null))).setDataSource(new TableDataSource("numfoo")).setDimensions(new DefaultDimensionSpec("dim4", "_d0")).setContext(context).build()), "j0.", "(\"dim4\" == \"j0._d0\")", JoinType.INNER, null, ExprMacroTable.nil())).context(context).build()), ImmutableList.of(new Object[] { "b", 15.0 }));
}
Also used : TopNQueryBuilder(org.apache.druid.query.topn.TopNQueryBuilder) DimensionTopNMetricSpec(org.apache.druid.query.topn.DimensionTopNMetricSpec) NotDimFilter(org.apache.druid.query.filter.NotDimFilter) DoubleSumAggregatorFactory(org.apache.druid.query.aggregation.DoubleSumAggregatorFactory) GlobalTableDataSource(org.apache.druid.query.GlobalTableDataSource) TableDataSource(org.apache.druid.query.TableDataSource) QueryDataSource(org.apache.druid.query.QueryDataSource) HashMap(java.util.HashMap) SelectorDimFilter(org.apache.druid.query.filter.SelectorDimFilter) DefaultDimensionSpec(org.apache.druid.query.dimension.DefaultDimensionSpec) Test(org.junit.Test)

Example 100 with TableDataSource

use of org.apache.druid.query.TableDataSource in project druid by druid-io.

the class CalciteJoinQueryTest method testGroupByInnerJoinOnLookupUsingJoinOperator.

@Test
@Parameters(source = QueryContextForJoinProvider.class)
public void testGroupByInnerJoinOnLookupUsingJoinOperator(Map<String, Object> queryContext) throws Exception {
    // Cannot vectorize JOIN operator.
    cannotVectorize();
    testQuery("SELECT lookyloo.v, COUNT(*)\n" + "FROM foo INNER JOIN lookup.lookyloo ON foo.dim1 = lookyloo.k\n" + "GROUP BY lookyloo.v", queryContext, ImmutableList.of(GroupByQuery.builder().setDataSource(join(new TableDataSource(CalciteTests.DATASOURCE1), new LookupDataSource("lookyloo"), "j0.", equalsCondition(makeColumnExpression("dim1"), makeColumnExpression("j0.k")), JoinType.INNER)).setInterval(querySegmentSpec(Filtration.eternity())).setGranularity(Granularities.ALL).setDimensions(dimensions(new DefaultDimensionSpec("j0.v", "d0"))).setAggregatorSpecs(aggregators(new CountAggregatorFactory("a0"))).setContext(queryContext).build()), ImmutableList.of(new Object[] { "xabc", 1L }));
}
Also used : GlobalTableDataSource(org.apache.druid.query.GlobalTableDataSource) TableDataSource(org.apache.druid.query.TableDataSource) CountAggregatorFactory(org.apache.druid.query.aggregation.CountAggregatorFactory) LookupDataSource(org.apache.druid.query.LookupDataSource) DefaultDimensionSpec(org.apache.druid.query.dimension.DefaultDimensionSpec) Parameters(junitparams.Parameters) Test(org.junit.Test)

Aggregations

TableDataSource (org.apache.druid.query.TableDataSource)118 Test (org.junit.Test)94 GlobalTableDataSource (org.apache.druid.query.GlobalTableDataSource)46 CountAggregatorFactory (org.apache.druid.query.aggregation.CountAggregatorFactory)43 QueryDataSource (org.apache.druid.query.QueryDataSource)41 DefaultDimensionSpec (org.apache.druid.query.dimension.DefaultDimensionSpec)40 Parameters (junitparams.Parameters)30 MultipleIntervalSegmentSpec (org.apache.druid.query.spec.MultipleIntervalSegmentSpec)19 LookupDataSource (org.apache.druid.query.LookupDataSource)18 DataSegment (org.apache.druid.timeline.DataSegment)15 Result (org.apache.druid.query.Result)14 CountDownLatch (java.util.concurrent.CountDownLatch)11 Query (org.apache.druid.query.Query)11 TimelineObjectHolder (org.apache.druid.timeline.TimelineObjectHolder)11 Interval (org.joda.time.Interval)11 SelectorDimFilter (org.apache.druid.query.filter.SelectorDimFilter)10 ArrayList (java.util.ArrayList)9 GroupByQuery (org.apache.druid.query.groupby.GroupByQuery)9 ISE (org.apache.druid.java.util.common.ISE)8 SegmentDescriptor (org.apache.druid.query.SegmentDescriptor)8