Search in sources :

Example 91 with TableDataSource

use of org.apache.druid.query.TableDataSource in project druid by druid-io.

the class DataSourcesResourceTest method testIsHandOffComplete.

@Test
public void testIsHandOffComplete() {
    MetadataRuleManager databaseRuleManager = EasyMock.createMock(MetadataRuleManager.class);
    Rule loadRule = new IntervalLoadRule(Intervals.of("2013-01-02T00:00:00Z/2013-01-03T00:00:00Z"), null);
    Rule dropRule = new IntervalDropRule(Intervals.of("2013-01-01T00:00:00Z/2013-01-02T00:00:00Z"));
    DataSourcesResource dataSourcesResource = new DataSourcesResource(inventoryView, null, databaseRuleManager, null, null, null);
    // test dropped
    EasyMock.expect(databaseRuleManager.getRulesWithDefault("dataSource1")).andReturn(ImmutableList.of(loadRule, dropRule)).once();
    EasyMock.replay(databaseRuleManager);
    String interval1 = "2013-01-01T01:00:00Z/2013-01-01T02:00:00Z";
    Response response1 = dataSourcesResource.isHandOffComplete("dataSource1", interval1, 1, "v1");
    Assert.assertTrue((boolean) response1.getEntity());
    EasyMock.verify(databaseRuleManager);
    // test isn't dropped and no timeline found
    EasyMock.reset(databaseRuleManager);
    EasyMock.expect(databaseRuleManager.getRulesWithDefault("dataSource1")).andReturn(ImmutableList.of(loadRule, dropRule)).once();
    EasyMock.expect(inventoryView.getTimeline(new TableDataSource("dataSource1"))).andReturn(null).once();
    EasyMock.replay(inventoryView, databaseRuleManager);
    String interval2 = "2013-01-02T01:00:00Z/2013-01-02T02:00:00Z";
    Response response2 = dataSourcesResource.isHandOffComplete("dataSource1", interval2, 1, "v1");
    Assert.assertFalse((boolean) response2.getEntity());
    EasyMock.verify(inventoryView, databaseRuleManager);
    // test isn't dropped and timeline exist
    String interval3 = "2013-01-02T02:00:00Z/2013-01-02T03:00:00Z";
    SegmentLoadInfo segmentLoadInfo = new SegmentLoadInfo(createSegment(Intervals.of(interval3), "v1", 1));
    segmentLoadInfo.addServer(createHistoricalServerMetadata("test"));
    VersionedIntervalTimeline<String, SegmentLoadInfo> timeline = new VersionedIntervalTimeline<String, SegmentLoadInfo>(null) {

        @Override
        public List<TimelineObjectHolder<String, SegmentLoadInfo>> lookupWithIncompletePartitions(Interval interval) {
            PartitionHolder<SegmentLoadInfo> partitionHolder = new PartitionHolder<>(new NumberedPartitionChunk<>(1, 1, segmentLoadInfo));
            List<TimelineObjectHolder<String, SegmentLoadInfo>> ret = new ArrayList<>();
            ret.add(new TimelineObjectHolder<>(Intervals.of(interval3), "v1", partitionHolder));
            return ret;
        }
    };
    EasyMock.reset(inventoryView, databaseRuleManager);
    EasyMock.expect(databaseRuleManager.getRulesWithDefault("dataSource1")).andReturn(ImmutableList.of(loadRule, dropRule)).once();
    EasyMock.expect(inventoryView.getTimeline(new TableDataSource("dataSource1"))).andReturn(timeline).once();
    EasyMock.replay(inventoryView, databaseRuleManager);
    Response response3 = dataSourcesResource.isHandOffComplete("dataSource1", interval3, 1, "v1");
    Assert.assertTrue((boolean) response3.getEntity());
    EasyMock.verify(inventoryView, databaseRuleManager);
}
Also used : PartitionHolder(org.apache.druid.timeline.partition.PartitionHolder) MetadataRuleManager(org.apache.druid.metadata.MetadataRuleManager) IntervalLoadRule(org.apache.druid.server.coordinator.rules.IntervalLoadRule) SegmentLoadInfo(org.apache.druid.client.SegmentLoadInfo) ImmutableSegmentLoadInfo(org.apache.druid.client.ImmutableSegmentLoadInfo) ArrayList(java.util.ArrayList) IntervalDropRule(org.apache.druid.server.coordinator.rules.IntervalDropRule) Response(javax.ws.rs.core.Response) TimelineObjectHolder(org.apache.druid.timeline.TimelineObjectHolder) TableDataSource(org.apache.druid.query.TableDataSource) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) IntervalLoadRule(org.apache.druid.server.coordinator.rules.IntervalLoadRule) Rule(org.apache.druid.server.coordinator.rules.Rule) IntervalDropRule(org.apache.druid.server.coordinator.rules.IntervalDropRule) Interval(org.joda.time.Interval) Test(org.junit.Test)

Example 92 with TableDataSource

use of org.apache.druid.query.TableDataSource in project druid by druid-io.

the class DruidQueryTest method test_filtration_intervalInQueryFilter.

@Test
public void test_filtration_intervalInQueryFilter() {
    DataSource dataSource = new TableDataSource("test");
    Pair<DataSource, Filtration> pair = DruidQuery.getFiltration(dataSource, filterWithInterval, VirtualColumnRegistry.create(RowSignature.empty(), TestExprMacroTable.INSTANCE));
    verify(pair, dataSource, selectorFilter, Intervals.utc(100, 200));
}
Also used : Filtration(org.apache.druid.sql.calcite.filtration.Filtration) TableDataSource(org.apache.druid.query.TableDataSource) DataSource(org.apache.druid.query.DataSource) TableDataSource(org.apache.druid.query.TableDataSource) JoinDataSource(org.apache.druid.query.JoinDataSource) Test(org.junit.Test)

Example 93 with TableDataSource

use of org.apache.druid.query.TableDataSource in project druid by druid-io.

the class DruidQueryTest method test_filtration_noJoinAndInterval.

@Test
public void test_filtration_noJoinAndInterval() {
    DataSource dataSource = new TableDataSource("test");
    Pair<DataSource, Filtration> pair = DruidQuery.getFiltration(dataSource, selectorFilter, VirtualColumnRegistry.create(RowSignature.empty(), TestExprMacroTable.INSTANCE));
    verify(pair, dataSource, selectorFilter, Intervals.ETERNITY);
}
Also used : Filtration(org.apache.druid.sql.calcite.filtration.Filtration) TableDataSource(org.apache.druid.query.TableDataSource) DataSource(org.apache.druid.query.DataSource) TableDataSource(org.apache.druid.query.TableDataSource) JoinDataSource(org.apache.druid.query.JoinDataSource) Test(org.junit.Test)

Example 94 with TableDataSource

use of org.apache.druid.query.TableDataSource in project druid by druid-io.

the class DruidSchemaConcurrencyTest method testDruidSchemaRefreshAndInventoryViewAddSegmentAndBrokerServerViewGetTimeline.

/**
 * This tests the contention between 3 components, DruidSchema, InventoryView, and BrokerServerView.
 * It first triggers refreshing DruidSchema. To mimic some heavy work done with {@link DruidSchema#lock},
 * {@link DruidSchema#buildDruidTable} is overriden to sleep before doing real work. While refreshing DruidSchema,
 * more new segments are added to InventoryView, which triggers updates of BrokerServerView. Finally, while
 * BrokerServerView is updated, {@link BrokerServerView#getTimeline} is continuously called to mimic user query
 * processing. All these calls must return without heavy contention.
 */
@Test(timeout = 30000L)
public void testDruidSchemaRefreshAndInventoryViewAddSegmentAndBrokerServerViewGetTimeline() throws InterruptedException, ExecutionException, TimeoutException {
    schema = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null) {

        @Override
        DruidTable buildDruidTable(final String dataSource) {
            doInLock(() -> {
                try {
                    // Mimic some heavy work done in lock in DruidSchema
                    Thread.sleep(5000);
                } catch (InterruptedException e) {
                    throw new RuntimeException(e);
                }
            });
            return super.buildDruidTable(dataSource);
        }
    };
    int numExistingSegments = 100;
    int numServers = 19;
    CountDownLatch segmentLoadLatch = new CountDownLatch(numExistingSegments);
    serverView.registerTimelineCallback(Execs.directExecutor(), new TimelineCallback() {

        @Override
        public CallbackAction timelineInitialized() {
            return CallbackAction.CONTINUE;
        }

        @Override
        public CallbackAction segmentAdded(DruidServerMetadata server, DataSegment segment) {
            segmentLoadLatch.countDown();
            return CallbackAction.CONTINUE;
        }

        @Override
        public CallbackAction segmentRemoved(DataSegment segment) {
            return CallbackAction.CONTINUE;
        }

        @Override
        public CallbackAction serverSegmentRemoved(DruidServerMetadata server, DataSegment segment) {
            return CallbackAction.CONTINUE;
        }
    });
    addSegmentsToCluster(0, numServers, numExistingSegments);
    // Wait for all segments to be loaded in BrokerServerView
    Assert.assertTrue(segmentLoadLatch.await(5, TimeUnit.SECONDS));
    // Trigger refresh of DruidSchema. This will internally run the heavy work mimicked by the overriden buildDruidTable
    Future refreshFuture = exec.submit(() -> {
        schema.refresh(walker.getSegments().stream().map(DataSegment::getId).collect(Collectors.toSet()), Sets.newHashSet(DATASOURCE));
        return null;
    });
    // Trigger updates of BrokerServerView. This should be done asynchronously.
    // add completely new segments
    addSegmentsToCluster(numExistingSegments, numServers, 50);
    // add replicas of the first 30 segments.
    addReplicasToCluster(1, numServers, 30);
    // for the first 30 segments, we will still have replicas.
    // for the other 20 segments, they will be completely removed from the cluster.
    removeSegmentsFromCluster(numServers, 50);
    Assert.assertFalse(refreshFuture.isDone());
    for (int i = 0; i < 1000; i++) {
        boolean hasTimeline = exec.submit(() -> serverView.getTimeline(DataSourceAnalysis.forDataSource(new TableDataSource(DATASOURCE))).isPresent()).get(100, TimeUnit.MILLISECONDS);
        Assert.assertTrue(hasTimeline);
        // We want to call getTimeline while BrokerServerView is being updated. Sleep might help with timing.
        Thread.sleep(2);
    }
    refreshFuture.get(10, TimeUnit.SECONDS);
}
Also used : DruidTable(org.apache.druid.sql.calcite.table.DruidTable) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) CountDownLatch(java.util.concurrent.CountDownLatch) DataSegment(org.apache.druid.timeline.DataSegment) NoopEscalator(org.apache.druid.server.security.NoopEscalator) TableDataSource(org.apache.druid.query.TableDataSource) CallbackAction(org.apache.druid.client.ServerView.CallbackAction) BrokerInternalQueryConfig(org.apache.druid.client.BrokerInternalQueryConfig) TimelineCallback(org.apache.druid.client.TimelineServerView.TimelineCallback) Future(java.util.concurrent.Future) MapJoinableFactory(org.apache.druid.segment.join.MapJoinableFactory) Test(org.junit.Test)

Example 95 with TableDataSource

use of org.apache.druid.query.TableDataSource in project druid by druid-io.

the class DruidSchemaTest method testLocalSegmentCacheSetsDataSourceAsBroadcastButNotJoinable.

@Test
public void testLocalSegmentCacheSetsDataSourceAsBroadcastButNotJoinable() throws InterruptedException {
    DruidTable fooTable = (DruidTable) schema.getTableMap().get("foo");
    Assert.assertNotNull(fooTable);
    Assert.assertTrue(fooTable.getDataSource() instanceof TableDataSource);
    Assert.assertFalse(fooTable.getDataSource() instanceof GlobalTableDataSource);
    Assert.assertFalse(fooTable.isJoinable());
    Assert.assertFalse(fooTable.isBroadcast());
    // wait for build twice
    Assert.assertTrue(buildTableLatch.await(1, TimeUnit.SECONDS));
    buildTableLatch = new CountDownLatch(1);
    final DataSegment someNewBrokerSegment = new DataSegment("foo", Intervals.of("2012/2013"), "version1", null, ImmutableList.of("dim1", "dim2"), ImmutableList.of("met1", "met2"), new NumberedShardSpec(2, 3), null, 1, 100L, PruneSpecsHolder.DEFAULT);
    segmentDataSourceNames.add("foo");
    serverView.addSegment(someNewBrokerSegment, ServerType.BROKER);
    Assert.assertTrue(markDataSourceLatch.await(2, TimeUnit.SECONDS));
    Assert.assertTrue(buildTableLatch.await(2, TimeUnit.SECONDS));
    // wait for get again, just to make sure table has been updated (latch counts down just before tables are updated)
    Assert.assertTrue(getDatasourcesLatch.await(2, TimeUnit.SECONDS));
    fooTable = (DruidTable) schema.getTableMap().get("foo");
    Assert.assertNotNull(fooTable);
    Assert.assertTrue(fooTable.getDataSource() instanceof TableDataSource);
    // should not be a GlobalTableDataSource for now, because isGlobal is couple with joinability. idealy this will be
    // changed in the future and we should expect
    Assert.assertFalse(fooTable.getDataSource() instanceof GlobalTableDataSource);
    Assert.assertTrue(fooTable.isBroadcast());
    Assert.assertFalse(fooTable.isJoinable());
    // now remove it
    markDataSourceLatch = new CountDownLatch(1);
    buildTableLatch = new CountDownLatch(1);
    getDatasourcesLatch = new CountDownLatch(1);
    segmentDataSourceNames.remove("foo");
    serverView.removeSegment(someNewBrokerSegment, ServerType.BROKER);
    Assert.assertTrue(markDataSourceLatch.await(2, TimeUnit.SECONDS));
    // wait for build
    Assert.assertTrue(buildTableLatch.await(2, TimeUnit.SECONDS));
    // wait for get again, just to make sure table has been updated (latch counts down just before tables are updated)
    Assert.assertTrue(getDatasourcesLatch.await(2, TimeUnit.SECONDS));
    fooTable = (DruidTable) schema.getTableMap().get("foo");
    Assert.assertNotNull(fooTable);
    Assert.assertTrue(fooTable.getDataSource() instanceof TableDataSource);
    Assert.assertFalse(fooTable.getDataSource() instanceof GlobalTableDataSource);
    Assert.assertFalse(fooTable.isBroadcast());
    Assert.assertFalse(fooTable.isJoinable());
}
Also used : GlobalTableDataSource(org.apache.druid.query.GlobalTableDataSource) TableDataSource(org.apache.druid.query.TableDataSource) DruidTable(org.apache.druid.sql.calcite.table.DruidTable) GlobalTableDataSource(org.apache.druid.query.GlobalTableDataSource) CountDownLatch(java.util.concurrent.CountDownLatch) DataSegment(org.apache.druid.timeline.DataSegment) NumberedShardSpec(org.apache.druid.timeline.partition.NumberedShardSpec) Test(org.junit.Test)

Aggregations

TableDataSource (org.apache.druid.query.TableDataSource)118 Test (org.junit.Test)94 GlobalTableDataSource (org.apache.druid.query.GlobalTableDataSource)46 CountAggregatorFactory (org.apache.druid.query.aggregation.CountAggregatorFactory)43 QueryDataSource (org.apache.druid.query.QueryDataSource)41 DefaultDimensionSpec (org.apache.druid.query.dimension.DefaultDimensionSpec)40 Parameters (junitparams.Parameters)30 MultipleIntervalSegmentSpec (org.apache.druid.query.spec.MultipleIntervalSegmentSpec)19 LookupDataSource (org.apache.druid.query.LookupDataSource)18 DataSegment (org.apache.druid.timeline.DataSegment)15 Result (org.apache.druid.query.Result)14 CountDownLatch (java.util.concurrent.CountDownLatch)11 Query (org.apache.druid.query.Query)11 TimelineObjectHolder (org.apache.druid.timeline.TimelineObjectHolder)11 Interval (org.joda.time.Interval)11 SelectorDimFilter (org.apache.druid.query.filter.SelectorDimFilter)10 ArrayList (java.util.ArrayList)9 GroupByQuery (org.apache.druid.query.groupby.GroupByQuery)9 ISE (org.apache.druid.java.util.common.ISE)8 SegmentDescriptor (org.apache.druid.query.SegmentDescriptor)8