Search in sources :

Example 81 with TableDataSource

use of org.apache.druid.query.TableDataSource in project druid by druid-io.

the class SegmentAnalyzerTest method getSegmentAnalysises.

/**
 * *Awesome* method name auto-generated by IntelliJ!  I love IntelliJ!
 *
 * @param index
 * @return
 */
private List<SegmentAnalysis> getSegmentAnalysises(Segment index, EnumSet<SegmentMetadataQuery.AnalysisType> analyses) {
    final QueryRunner runner = QueryRunnerTestHelper.makeQueryRunner((QueryRunnerFactory) new SegmentMetadataQueryRunnerFactory(new SegmentMetadataQueryQueryToolChest(new SegmentMetadataQueryConfig()), QueryRunnerTestHelper.NOOP_QUERYWATCHER), index, null);
    final SegmentMetadataQuery query = new SegmentMetadataQuery(new TableDataSource("test"), new LegacySegmentSpec("2011/2012"), null, null, null, analyses, false, false);
    return runner.run(QueryPlus.wrap(query)).toList();
}
Also used : TableDataSource(org.apache.druid.query.TableDataSource) SegmentMetadataQuery(org.apache.druid.query.metadata.metadata.SegmentMetadataQuery) QueryRunner(org.apache.druid.query.QueryRunner) LegacySegmentSpec(org.apache.druid.query.spec.LegacySegmentSpec)

Example 82 with TableDataSource

use of org.apache.druid.query.TableDataSource in project druid by druid-io.

the class BrokerServerViewTest method testMultipleTiers.

@Test
public void testMultipleTiers() throws Exception {
    segmentViewInitLatch = new CountDownLatch(1);
    segmentAddedLatch = new CountDownLatch(4);
    segmentRemovedLatch = new CountDownLatch(0);
    // Setup a Broker that watches only Tier 2
    final String tier1 = "tier1";
    final String tier2 = "tier2";
    setupViews(Sets.newHashSet(tier2), null, true);
    // Historical Tier 1 has segments 1 and 2, Tier 2 has segments 2 and 3
    final DruidServer server11 = setupHistoricalServer(tier1, "localhost:1", 1);
    final DruidServer server21 = setupHistoricalServer(tier2, "localhost:2", 1);
    final DataSegment segment1 = dataSegmentWithIntervalAndVersion("2020-01-01/P1D", "v1");
    announceSegmentForServer(server11, segment1, zkPathsConfig, jsonMapper);
    final DataSegment segment2 = dataSegmentWithIntervalAndVersion("2020-01-02/P1D", "v1");
    announceSegmentForServer(server11, segment2, zkPathsConfig, jsonMapper);
    announceSegmentForServer(server21, segment2, zkPathsConfig, jsonMapper);
    final DataSegment segment3 = dataSegmentWithIntervalAndVersion("2020-01-03/P1D", "v1");
    announceSegmentForServer(server21, segment3, zkPathsConfig, jsonMapper);
    // Wait for the segments to be added
    Assert.assertTrue(timing.forWaiting().awaitLatch(segmentViewInitLatch));
    Assert.assertTrue(timing.forWaiting().awaitLatch(segmentAddedLatch));
    // Get the timeline for the datasource
    TimelineLookup<String, ServerSelector> timeline = brokerServerView.getTimeline(DataSourceAnalysis.forDataSource(new TableDataSource(segment1.getDataSource()))).get();
    // Verify that the timeline has no entry for the interval of segment 1
    Assert.assertTrue(timeline.lookup(segment1.getInterval()).isEmpty());
    // Verify that there is one entry for the interval of segment 2
    List<TimelineObjectHolder<String, ServerSelector>> timelineHolders = timeline.lookup(segment2.getInterval());
    Assert.assertEquals(1, timelineHolders.size());
    TimelineObjectHolder<String, ServerSelector> timelineHolder = timelineHolders.get(0);
    Assert.assertEquals(segment2.getInterval(), timelineHolder.getInterval());
    Assert.assertEquals(segment2.getVersion(), timelineHolder.getVersion());
    PartitionHolder<ServerSelector> partitionHolder = timelineHolder.getObject();
    Assert.assertTrue(partitionHolder.isComplete());
    Assert.assertEquals(1, Iterables.size(partitionHolder));
    ServerSelector selector = (partitionHolder.iterator().next()).getObject();
    Assert.assertFalse(selector.isEmpty());
    Assert.assertEquals(segment2, selector.getSegment());
    // Verify that the ServerSelector always picks Tier 1
    for (int i = 0; i < 5; ++i) {
        Assert.assertEquals(server21, selector.pick(null).getServer());
    }
    Assert.assertEquals(Collections.singletonList(server21.getMetadata()), selector.getCandidates(2));
}
Also used : ServerSelector(org.apache.druid.client.selector.ServerSelector) TimelineObjectHolder(org.apache.druid.timeline.TimelineObjectHolder) TableDataSource(org.apache.druid.query.TableDataSource) CountDownLatch(java.util.concurrent.CountDownLatch) DataSegment(org.apache.druid.timeline.DataSegment) Test(org.junit.Test)

Example 83 with TableDataSource

use of org.apache.druid.query.TableDataSource in project druid by druid-io.

the class BrokerServerViewTest method testIgnoredTiers.

@Test
public void testIgnoredTiers() throws Exception {
    segmentViewInitLatch = new CountDownLatch(1);
    segmentAddedLatch = new CountDownLatch(4);
    segmentRemovedLatch = new CountDownLatch(0);
    // Setup a Broker that does not watch Tier 1
    final String tier1 = "tier1";
    final String tier2 = "tier2";
    setupViews(null, Sets.newHashSet(tier1), false);
    // Historical Tier 1 has segments 1 and 2, Tier 2 has segments 2 and 3
    final DruidServer server11 = setupHistoricalServer(tier1, "localhost:1", 1);
    final DruidServer server21 = setupHistoricalServer(tier2, "localhost:2", 1);
    final DataSegment segment1 = dataSegmentWithIntervalAndVersion("2020-01-01/P1D", "v1");
    announceSegmentForServer(server11, segment1, zkPathsConfig, jsonMapper);
    final DataSegment segment2 = dataSegmentWithIntervalAndVersion("2020-01-02/P1D", "v1");
    announceSegmentForServer(server11, segment2, zkPathsConfig, jsonMapper);
    announceSegmentForServer(server21, segment2, zkPathsConfig, jsonMapper);
    final DataSegment segment3 = dataSegmentWithIntervalAndVersion("2020-01-03/P1D", "v1");
    announceSegmentForServer(server21, segment3, zkPathsConfig, jsonMapper);
    // Wait for the segments to be added
    Assert.assertTrue(timing.forWaiting().awaitLatch(segmentViewInitLatch));
    Assert.assertTrue(timing.forWaiting().awaitLatch(segmentAddedLatch));
    // Get the timeline for the datasource
    TimelineLookup<String, ServerSelector> timeline = brokerServerView.getTimeline(DataSourceAnalysis.forDataSource(new TableDataSource(segment1.getDataSource()))).get();
    // Verify that the timeline has no entry for the interval of segment 1
    Assert.assertTrue(timeline.lookup(segment1.getInterval()).isEmpty());
    // Verify that there is one entry for the interval of segment 2
    List<TimelineObjectHolder<String, ServerSelector>> timelineHolders = timeline.lookup(segment2.getInterval());
    Assert.assertEquals(1, timelineHolders.size());
    TimelineObjectHolder<String, ServerSelector> timelineHolder = timelineHolders.get(0);
    Assert.assertEquals(segment2.getInterval(), timelineHolder.getInterval());
    Assert.assertEquals(segment2.getVersion(), timelineHolder.getVersion());
    PartitionHolder<ServerSelector> partitionHolder = timelineHolder.getObject();
    Assert.assertTrue(partitionHolder.isComplete());
    Assert.assertEquals(1, Iterables.size(partitionHolder));
    ServerSelector selector = (partitionHolder.iterator().next()).getObject();
    Assert.assertFalse(selector.isEmpty());
    Assert.assertEquals(segment2, selector.getSegment());
    // Verify that the ServerSelector always picks Tier 1
    for (int i = 0; i < 5; ++i) {
        Assert.assertEquals(server21, selector.pick(null).getServer());
    }
    Assert.assertEquals(Collections.singletonList(server21.getMetadata()), selector.getCandidates(2));
}
Also used : ServerSelector(org.apache.druid.client.selector.ServerSelector) TimelineObjectHolder(org.apache.druid.timeline.TimelineObjectHolder) TableDataSource(org.apache.druid.query.TableDataSource) CountDownLatch(java.util.concurrent.CountDownLatch) DataSegment(org.apache.druid.timeline.DataSegment) Test(org.junit.Test)

Example 84 with TableDataSource

use of org.apache.druid.query.TableDataSource in project druid by druid-io.

the class BrokerServerViewTest method testSingleServerAddedRemovedSegment.

@Test
public void testSingleServerAddedRemovedSegment() throws Exception {
    segmentViewInitLatch = new CountDownLatch(1);
    segmentAddedLatch = new CountDownLatch(1);
    segmentRemovedLatch = new CountDownLatch(1);
    setupViews();
    final DruidServer druidServer = setupHistoricalServer("default_tier", "localhost:1234", 0);
    final DataSegment segment = dataSegmentWithIntervalAndVersion("2014-10-20T00:00:00Z/P1D", "v1");
    final int partition = segment.getShardSpec().getPartitionNum();
    final Interval intervals = Intervals.of("2014-10-20T00:00:00Z/P1D");
    announceSegmentForServer(druidServer, segment, zkPathsConfig, jsonMapper);
    Assert.assertTrue(timing.forWaiting().awaitLatch(segmentViewInitLatch));
    Assert.assertTrue(timing.forWaiting().awaitLatch(segmentAddedLatch));
    TimelineLookup<String, ServerSelector> timeline = brokerServerView.getTimeline(DataSourceAnalysis.forDataSource(new TableDataSource("test_broker_server_view"))).get();
    List<TimelineObjectHolder<String, ServerSelector>> serverLookupRes = timeline.lookup(intervals);
    Assert.assertEquals(1, serverLookupRes.size());
    TimelineObjectHolder<String, ServerSelector> actualTimelineObjectHolder = serverLookupRes.get(0);
    Assert.assertEquals(intervals, actualTimelineObjectHolder.getInterval());
    Assert.assertEquals("v1", actualTimelineObjectHolder.getVersion());
    PartitionHolder<ServerSelector> actualPartitionHolder = actualTimelineObjectHolder.getObject();
    Assert.assertTrue(actualPartitionHolder.isComplete());
    Assert.assertEquals(1, Iterables.size(actualPartitionHolder));
    ServerSelector selector = (actualPartitionHolder.iterator().next()).getObject();
    Assert.assertFalse(selector.isEmpty());
    Assert.assertEquals(segment, selector.getSegment());
    Assert.assertEquals(druidServer, selector.pick(null).getServer());
    Assert.assertNotNull(timeline.findChunk(intervals, "v1", partition));
    unannounceSegmentForServer(druidServer, segment, zkPathsConfig);
    Assert.assertTrue(timing.forWaiting().awaitLatch(segmentRemovedLatch));
    Assert.assertEquals(0, timeline.lookup(intervals).size());
    Assert.assertNull(timeline.findChunk(intervals, "v1", partition));
}
Also used : CountDownLatch(java.util.concurrent.CountDownLatch) DataSegment(org.apache.druid.timeline.DataSegment) ServerSelector(org.apache.druid.client.selector.ServerSelector) TimelineObjectHolder(org.apache.druid.timeline.TimelineObjectHolder) TableDataSource(org.apache.druid.query.TableDataSource) Interval(org.joda.time.Interval) Test(org.junit.Test)

Example 85 with TableDataSource

use of org.apache.druid.query.TableDataSource in project druid by druid-io.

the class CoordinatorServerViewTest method testSingleServerAddedRemovedSegment.

@Test
public void testSingleServerAddedRemovedSegment() throws Exception {
    segmentViewInitLatch = new CountDownLatch(1);
    segmentAddedLatch = new CountDownLatch(1);
    segmentRemovedLatch = new CountDownLatch(1);
    setupViews();
    final DruidServer druidServer = new DruidServer("localhost:1234", "localhost:1234", null, 10000000L, ServerType.HISTORICAL, "default_tier", 0);
    setupZNodeForServer(druidServer, zkPathsConfig, jsonMapper);
    final DataSegment segment = dataSegmentWithIntervalAndVersion("2014-10-20T00:00:00Z/P1D", "v1");
    final int partition = segment.getShardSpec().getPartitionNum();
    final Interval intervals = Intervals.of("2014-10-20T00:00:00Z/P1D");
    announceSegmentForServer(druidServer, segment, zkPathsConfig, jsonMapper);
    Assert.assertTrue(timing.forWaiting().awaitLatch(segmentViewInitLatch));
    Assert.assertTrue(timing.forWaiting().awaitLatch(segmentAddedLatch));
    TimelineLookup timeline = overlordServerView.getTimeline(new TableDataSource("test_overlord_server_view"));
    List<TimelineObjectHolder> serverLookupRes = (List<TimelineObjectHolder>) timeline.lookup(intervals);
    Assert.assertEquals(1, serverLookupRes.size());
    TimelineObjectHolder<String, SegmentLoadInfo> actualTimelineObjectHolder = serverLookupRes.get(0);
    Assert.assertEquals(intervals, actualTimelineObjectHolder.getInterval());
    Assert.assertEquals("v1", actualTimelineObjectHolder.getVersion());
    PartitionHolder<SegmentLoadInfo> actualPartitionHolder = actualTimelineObjectHolder.getObject();
    Assert.assertTrue(actualPartitionHolder.isComplete());
    Assert.assertEquals(1, Iterables.size(actualPartitionHolder));
    SegmentLoadInfo segmentLoadInfo = actualPartitionHolder.iterator().next().getObject();
    Assert.assertFalse(segmentLoadInfo.isEmpty());
    Assert.assertEquals(druidServer.getMetadata(), Iterables.getOnlyElement(segmentLoadInfo.toImmutableSegmentLoadInfo().getServers()));
    Assert.assertNotNull(timeline.findChunk(intervals, "v1", partition));
    unannounceSegmentForServer(druidServer, segment);
    Assert.assertTrue(timing.forWaiting().awaitLatch(segmentRemovedLatch));
    Assert.assertEquals(0, ((List<TimelineObjectHolder>) timeline.lookup(Intervals.of("2014-10-20T00:00:00Z/P1D"))).size());
    Assert.assertNull(timeline.findChunk(intervals, "v1", partition));
}
Also used : CountDownLatch(java.util.concurrent.CountDownLatch) DataSegment(org.apache.druid.timeline.DataSegment) TimelineObjectHolder(org.apache.druid.timeline.TimelineObjectHolder) TableDataSource(org.apache.druid.query.TableDataSource) ImmutableList(com.google.common.collect.ImmutableList) List(java.util.List) TimelineLookup(org.apache.druid.timeline.TimelineLookup) Interval(org.joda.time.Interval) Test(org.junit.Test)

Aggregations

TableDataSource (org.apache.druid.query.TableDataSource)118 Test (org.junit.Test)94 GlobalTableDataSource (org.apache.druid.query.GlobalTableDataSource)46 CountAggregatorFactory (org.apache.druid.query.aggregation.CountAggregatorFactory)43 QueryDataSource (org.apache.druid.query.QueryDataSource)41 DefaultDimensionSpec (org.apache.druid.query.dimension.DefaultDimensionSpec)40 Parameters (junitparams.Parameters)30 MultipleIntervalSegmentSpec (org.apache.druid.query.spec.MultipleIntervalSegmentSpec)19 LookupDataSource (org.apache.druid.query.LookupDataSource)18 DataSegment (org.apache.druid.timeline.DataSegment)15 Result (org.apache.druid.query.Result)14 CountDownLatch (java.util.concurrent.CountDownLatch)11 Query (org.apache.druid.query.Query)11 TimelineObjectHolder (org.apache.druid.timeline.TimelineObjectHolder)11 Interval (org.joda.time.Interval)11 SelectorDimFilter (org.apache.druid.query.filter.SelectorDimFilter)10 ArrayList (java.util.ArrayList)9 GroupByQuery (org.apache.druid.query.groupby.GroupByQuery)9 ISE (org.apache.druid.java.util.common.ISE)8 SegmentDescriptor (org.apache.druid.query.SegmentDescriptor)8