Search in sources :

Example 6 with ServerSelector

use of org.apache.druid.client.selector.ServerSelector in project druid by druid-io.

the class CachingClusteredClientTest method testIfNoneMatch.

@Test
public void testIfNoneMatch() {
    Interval interval = Intervals.of("2016/2017");
    final DataSegment dataSegment = new DataSegment("dataSource", interval, "ver", ImmutableMap.of("type", "hdfs", "path", "/tmp"), ImmutableList.of("product"), ImmutableList.of("visited_sum"), NoneShardSpec.instance(), 9, 12334);
    final ServerSelector selector = new ServerSelector(dataSegment, new HighestPriorityTierSelectorStrategy(new RandomServerSelectorStrategy()));
    selector.addServerAndUpdateSegment(new QueryableDruidServer(servers[0], null), dataSegment);
    timeline.add(interval, "ver", new SingleElementPartitionChunk<>(selector));
    TimeBoundaryQuery query = Druids.newTimeBoundaryQueryBuilder().dataSource(DATA_SOURCE).intervals(new MultipleIntervalSegmentSpec(ImmutableList.of(interval))).context(ImmutableMap.of("If-None-Match", "aVJV29CJY93rszVW/QBy0arWZo0=")).randomQueryId().build();
    final ResponseContext responseContext = initializeResponseContext();
    getDefaultQueryRunner().run(QueryPlus.wrap(query), responseContext);
    Assert.assertEquals("MDs2yIUvYLVzaG6zmwTH1plqaYE=", responseContext.getEntityTag());
}
Also used : ServerSelector(org.apache.druid.client.selector.ServerSelector) HighestPriorityTierSelectorStrategy(org.apache.druid.client.selector.HighestPriorityTierSelectorStrategy) ResponseContext(org.apache.druid.query.context.ResponseContext) MultipleIntervalSegmentSpec(org.apache.druid.query.spec.MultipleIntervalSegmentSpec) TimeBoundaryQuery(org.apache.druid.query.timeboundary.TimeBoundaryQuery) DataSegment(org.apache.druid.timeline.DataSegment) RandomServerSelectorStrategy(org.apache.druid.client.selector.RandomServerSelectorStrategy) Interval(org.joda.time.Interval) QueryableDruidServer(org.apache.druid.client.selector.QueryableDruidServer) Test(org.junit.Test)

Example 7 with ServerSelector

use of org.apache.druid.client.selector.ServerSelector in project druid by druid-io.

the class CachingClusteredClientTest method populateTimeline.

private List<Map<DruidServer, ServerExpectations>> populateTimeline(List<Interval> queryIntervals, List<List<Iterable<Result<Object>>>> expectedResults, int numQueryIntervals, List<Object> mocks) {
    timeline = new VersionedIntervalTimeline<>(Ordering.natural());
    final List<Map<DruidServer, ServerExpectations>> serverExpectationList = new ArrayList<>();
    for (int k = 0; k < numQueryIntervals + 1; ++k) {
        final int numChunks = expectedResults.get(k).size();
        final TreeMap<DruidServer, ServerExpectations> serverExpectations = new TreeMap<>();
        serverExpectationList.add(serverExpectations);
        for (int j = 0; j < numChunks; ++j) {
            DruidServer lastServer = servers[random.nextInt(servers.length)];
            serverExpectations.computeIfAbsent(lastServer, server -> new ServerExpectations(server, makeMock(mocks, QueryRunner.class)));
            final ShardSpec shardSpec;
            if (numChunks == 1) {
                shardSpec = new SingleDimensionShardSpec("dimAll", null, null, 0, 1);
            } else {
                String start = null;
                String end = null;
                if (j > 0) {
                    start = String.valueOf(j);
                }
                if (j + 1 < numChunks) {
                    end = String.valueOf(j + 1);
                }
                shardSpec = new SingleDimensionShardSpec("dim" + k, start, end, j, numChunks);
            }
            DataSegment mockSegment = makeMock(mocks, DataSegment.class);
            ServerExpectation<Object> expectation = new ServerExpectation<>(// interval/chunk
            SegmentId.dummy(StringUtils.format("%s_%s", k, j)), queryIntervals.get(k), mockSegment, shardSpec, expectedResults.get(k).get(j));
            serverExpectations.get(lastServer).addExpectation(expectation);
            EasyMock.expect(mockSegment.getSize()).andReturn(0L).anyTimes();
            EasyMock.replay(mockSegment);
            ServerSelector selector = new ServerSelector(expectation.getSegment(), new HighestPriorityTierSelectorStrategy(new RandomServerSelectorStrategy()));
            selector.addServerAndUpdateSegment(new QueryableDruidServer(lastServer, null), selector.getSegment());
            EasyMock.reset(mockSegment);
            EasyMock.expect(mockSegment.getShardSpec()).andReturn(shardSpec).anyTimes();
            timeline.add(queryIntervals.get(k), String.valueOf(k), shardSpec.createChunk(selector));
        }
    }
    return serverExpectationList;
}
Also used : ArrayList(java.util.ArrayList) QueryableDruidServer(org.apache.druid.client.selector.QueryableDruidServer) TreeMap(java.util.TreeMap) DataSegment(org.apache.druid.timeline.DataSegment) HashBasedNumberedShardSpec(org.apache.druid.timeline.partition.HashBasedNumberedShardSpec) NoneShardSpec(org.apache.druid.timeline.partition.NoneShardSpec) ShardSpec(org.apache.druid.timeline.partition.ShardSpec) SingleDimensionShardSpec(org.apache.druid.timeline.partition.SingleDimensionShardSpec) QueryableDruidServer(org.apache.druid.client.selector.QueryableDruidServer) ServerSelector(org.apache.druid.client.selector.ServerSelector) HighestPriorityTierSelectorStrategy(org.apache.druid.client.selector.HighestPriorityTierSelectorStrategy) Map(java.util.Map) TreeMap(java.util.TreeMap) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap) SingleDimensionShardSpec(org.apache.druid.timeline.partition.SingleDimensionShardSpec) RandomServerSelectorStrategy(org.apache.druid.client.selector.RandomServerSelectorStrategy)

Example 8 with ServerSelector

use of org.apache.druid.client.selector.ServerSelector in project druid by druid-io.

the class CachingClusteredClientTest method testNoSegmentPruningForHashPartitionedSegments.

private void testNoSegmentPruningForHashPartitionedSegments(boolean enableSegmentPruning, @Nullable HashPartitionFunction partitionFunction, boolean useEmptyPartitionDimensions) {
    DimFilter filter = new AndDimFilter(new SelectorDimFilter("dim1", "a", null), new BoundDimFilter("dim2", "e", "zzz", true, true, false, null, StringComparators.LEXICOGRAPHIC), // Equivalent filter of dim3 below is InDimFilter("dim3", Arrays.asList("c"), null)
    new AndDimFilter(new InDimFilter("dim3", Arrays.asList("a", "c", "e", "g"), null), new BoundDimFilter("dim3", "aaa", "ddd", false, false, false, null, StringComparators.LEXICOGRAPHIC)));
    final Map<String, Object> context = new HashMap<>(CONTEXT);
    context.put(QueryContexts.SECONDARY_PARTITION_PRUNING_KEY, enableSegmentPruning);
    final Druids.TimeseriesQueryBuilder builder = Druids.newTimeseriesQueryBuilder().dataSource(DATA_SOURCE).filters(filter).granularity(GRANULARITY).intervals(SEG_SPEC).intervals("2011-01-05/2011-01-10").aggregators(RENAMED_AGGS).postAggregators(RENAMED_POST_AGGS).context(context).randomQueryId();
    TimeseriesQuery query = builder.build();
    QueryRunner runner = new FinalizeResultsQueryRunner(getDefaultQueryRunner(), new TimeseriesQueryQueryToolChest());
    final Interval interval1 = Intervals.of("2011-01-06/2011-01-07");
    final Interval interval2 = Intervals.of("2011-01-07/2011-01-08");
    final Interval interval3 = Intervals.of("2011-01-08/2011-01-09");
    final DruidServer lastServer = servers[random.nextInt(servers.length)];
    List<String> partitionDimensions = useEmptyPartitionDimensions ? ImmutableList.of() : ImmutableList.of("dim1");
    final int numPartitions1 = 6;
    for (int i = 0; i < numPartitions1; i++) {
        ServerSelector selector = makeMockHashBasedSelector(lastServer, partitionDimensions, partitionFunction, i, numPartitions1);
        timeline.add(interval1, "v", new NumberedPartitionChunk<>(i, numPartitions1, selector));
    }
    partitionDimensions = useEmptyPartitionDimensions ? ImmutableList.of() : ImmutableList.of("dim2");
    final int numPartitions2 = 3;
    for (int i = 0; i < numPartitions2; i++) {
        ServerSelector selector = makeMockHashBasedSelector(lastServer, partitionDimensions, partitionFunction, i, numPartitions2);
        timeline.add(interval2, "v", new NumberedPartitionChunk<>(i, numPartitions2, selector));
    }
    partitionDimensions = useEmptyPartitionDimensions ? ImmutableList.of() : ImmutableList.of("dim1", "dim3");
    final int numPartitions3 = 4;
    for (int i = 0; i < numPartitions3; i++) {
        ServerSelector selector = makeMockHashBasedSelector(lastServer, partitionDimensions, partitionFunction, i, numPartitions3);
        timeline.add(interval3, "v", new NumberedPartitionChunk<>(i, numPartitions3, selector));
    }
    final Capture<QueryPlus> capture = Capture.newInstance();
    final Capture<ResponseContext> contextCap = Capture.newInstance();
    QueryRunner mockRunner = EasyMock.createNiceMock(QueryRunner.class);
    EasyMock.expect(mockRunner.run(EasyMock.capture(capture), EasyMock.capture(contextCap))).andReturn(Sequences.empty()).anyTimes();
    EasyMock.expect(serverView.getQueryRunner(lastServer)).andReturn(mockRunner).anyTimes();
    EasyMock.replay(serverView);
    EasyMock.replay(mockRunner);
    // Expected to read all segments
    Set<SegmentDescriptor> expcetedDescriptors = new HashSet<>();
    IntStream.range(0, numPartitions1).forEach(i -> expcetedDescriptors.add(new SegmentDescriptor(interval1, "v", i)));
    IntStream.range(0, numPartitions2).forEach(i -> expcetedDescriptors.add(new SegmentDescriptor(interval2, "v", i)));
    IntStream.range(0, numPartitions3).forEach(i -> expcetedDescriptors.add(new SegmentDescriptor(interval3, "v", i)));
    runner.run(QueryPlus.wrap(query)).toList();
    QuerySegmentSpec querySegmentSpec = ((TimeseriesQuery) capture.getValue().getQuery()).getQuerySegmentSpec();
    Assert.assertSame(MultipleSpecificSegmentSpec.class, querySegmentSpec.getClass());
    final Set<SegmentDescriptor> actualDescriptors = new HashSet<>(((MultipleSpecificSegmentSpec) querySegmentSpec).getDescriptors());
    Assert.assertEquals(expcetedDescriptors, actualDescriptors);
}
Also used : BoundDimFilter(org.apache.druid.query.filter.BoundDimFilter) HashMap(java.util.HashMap) TimeseriesQueryQueryToolChest(org.apache.druid.query.timeseries.TimeseriesQueryQueryToolChest) ServerSelector(org.apache.druid.client.selector.ServerSelector) SelectorDimFilter(org.apache.druid.query.filter.SelectorDimFilter) SegmentDescriptor(org.apache.druid.query.SegmentDescriptor) Druids(org.apache.druid.query.Druids) ResponseContext(org.apache.druid.query.context.ResponseContext) InDimFilter(org.apache.druid.query.filter.InDimFilter) QuerySegmentSpec(org.apache.druid.query.spec.QuerySegmentSpec) QueryPlus(org.apache.druid.query.QueryPlus) HashSet(java.util.HashSet) AndDimFilter(org.apache.druid.query.filter.AndDimFilter) TimeseriesQuery(org.apache.druid.query.timeseries.TimeseriesQuery) QueryableDruidServer(org.apache.druid.client.selector.QueryableDruidServer) FinalizeResultsQueryRunner(org.apache.druid.query.FinalizeResultsQueryRunner) QueryRunner(org.apache.druid.query.QueryRunner) FinalizeResultsQueryRunner(org.apache.druid.query.FinalizeResultsQueryRunner) AndDimFilter(org.apache.druid.query.filter.AndDimFilter) DimFilter(org.apache.druid.query.filter.DimFilter) InDimFilter(org.apache.druid.query.filter.InDimFilter) SelectorDimFilter(org.apache.druid.query.filter.SelectorDimFilter) BoundDimFilter(org.apache.druid.query.filter.BoundDimFilter) OrDimFilter(org.apache.druid.query.filter.OrDimFilter) Interval(org.joda.time.Interval)

Example 9 with ServerSelector

use of org.apache.druid.client.selector.ServerSelector in project druid by druid-io.

the class BrokerServerViewTest method testRealtimeTasksNotWatched.

@Test
public void testRealtimeTasksNotWatched() throws Exception {
    segmentViewInitLatch = new CountDownLatch(1);
    segmentAddedLatch = new CountDownLatch(4);
    segmentRemovedLatch = new CountDownLatch(0);
    // Setup a Broker that watches only Historicals
    setupViews(null, null, false);
    // Historical has segments 2 and 3, Realtime has segments 1 and 2
    final DruidServer realtimeServer = setupDruidServer(ServerType.INDEXER_EXECUTOR, null, "realtime:1", 1);
    final DruidServer historicalServer = setupHistoricalServer("tier1", "historical:2", 1);
    final DataSegment segment1 = dataSegmentWithIntervalAndVersion("2020-01-01/P1D", "v1");
    announceSegmentForServer(realtimeServer, segment1, zkPathsConfig, jsonMapper);
    final DataSegment segment2 = dataSegmentWithIntervalAndVersion("2020-01-02/P1D", "v1");
    announceSegmentForServer(realtimeServer, segment2, zkPathsConfig, jsonMapper);
    announceSegmentForServer(historicalServer, segment2, zkPathsConfig, jsonMapper);
    final DataSegment segment3 = dataSegmentWithIntervalAndVersion("2020-01-03/P1D", "v1");
    announceSegmentForServer(historicalServer, segment3, zkPathsConfig, jsonMapper);
    // Wait for the segments to be added
    Assert.assertTrue(timing.forWaiting().awaitLatch(segmentViewInitLatch));
    Assert.assertTrue(timing.forWaiting().awaitLatch(segmentAddedLatch));
    // Get the timeline for the datasource
    TimelineLookup<String, ServerSelector> timeline = brokerServerView.getTimeline(DataSourceAnalysis.forDataSource(new TableDataSource(segment1.getDataSource()))).get();
    // Verify that the timeline has no entry for the interval of segment 1
    Assert.assertTrue(timeline.lookup(segment1.getInterval()).isEmpty());
    // Verify that there is one entry for the interval of segment 2
    List<TimelineObjectHolder<String, ServerSelector>> timelineHolders = timeline.lookup(segment2.getInterval());
    Assert.assertEquals(1, timelineHolders.size());
    TimelineObjectHolder<String, ServerSelector> timelineHolder = timelineHolders.get(0);
    Assert.assertEquals(segment2.getInterval(), timelineHolder.getInterval());
    Assert.assertEquals(segment2.getVersion(), timelineHolder.getVersion());
    PartitionHolder<ServerSelector> partitionHolder = timelineHolder.getObject();
    Assert.assertTrue(partitionHolder.isComplete());
    Assert.assertEquals(1, Iterables.size(partitionHolder));
    ServerSelector selector = (partitionHolder.iterator().next()).getObject();
    Assert.assertFalse(selector.isEmpty());
    Assert.assertEquals(segment2, selector.getSegment());
    // Verify that the ServerSelector always picks the Historical server
    for (int i = 0; i < 5; ++i) {
        Assert.assertEquals(historicalServer, selector.pick(null).getServer());
    }
    Assert.assertEquals(Collections.singletonList(historicalServer.getMetadata()), selector.getCandidates(2));
}
Also used : ServerSelector(org.apache.druid.client.selector.ServerSelector) TimelineObjectHolder(org.apache.druid.timeline.TimelineObjectHolder) TableDataSource(org.apache.druid.query.TableDataSource) CountDownLatch(java.util.concurrent.CountDownLatch) DataSegment(org.apache.druid.timeline.DataSegment) Test(org.junit.Test)

Example 10 with ServerSelector

use of org.apache.druid.client.selector.ServerSelector in project druid by druid-io.

the class BrokerServerView method serverAddedSegment.

private void serverAddedSegment(final DruidServerMetadata server, final DataSegment segment) {
    SegmentId segmentId = segment.getId();
    synchronized (lock) {
        // loop...
        if (!server.getType().equals(ServerType.BROKER)) {
            log.debug("Adding segment[%s] for server[%s]", segment, server);
            ServerSelector selector = selectors.get(segmentId);
            if (selector == null) {
                selector = new ServerSelector(segment, tierSelectorStrategy);
                VersionedIntervalTimeline<String, ServerSelector> timeline = timelines.get(segment.getDataSource());
                if (timeline == null) {
                    timeline = new VersionedIntervalTimeline<>(Ordering.natural());
                    timelines.put(segment.getDataSource(), timeline);
                }
                timeline.add(segment.getInterval(), segment.getVersion(), segment.getShardSpec().createChunk(selector));
                selectors.put(segmentId, selector);
            }
            QueryableDruidServer queryableDruidServer = clients.get(server.getName());
            if (queryableDruidServer == null) {
                queryableDruidServer = addServer(baseView.getInventoryValue(server.getName()));
            }
            selector.addServerAndUpdateSegment(queryableDruidServer, segment);
        }
        // run the callbacks, even if the segment came from a broker, lets downstream watchers decide what to do with it
        runTimelineCallbacks(callback -> callback.segmentAdded(server, segment));
    }
}
Also used : ServerSelector(org.apache.druid.client.selector.ServerSelector) SegmentId(org.apache.druid.timeline.SegmentId) QueryableDruidServer(org.apache.druid.client.selector.QueryableDruidServer)

Aggregations

ServerSelector (org.apache.druid.client.selector.ServerSelector)26 DataSegment (org.apache.druid.timeline.DataSegment)17 QueryableDruidServer (org.apache.druid.client.selector.QueryableDruidServer)16 HighestPriorityTierSelectorStrategy (org.apache.druid.client.selector.HighestPriorityTierSelectorStrategy)11 Interval (org.joda.time.Interval)11 RandomServerSelectorStrategy (org.apache.druid.client.selector.RandomServerSelectorStrategy)10 Test (org.junit.Test)10 TableDataSource (org.apache.druid.query.TableDataSource)7 ResponseContext (org.apache.druid.query.context.ResponseContext)6 ArrayList (java.util.ArrayList)5 QueryRunner (org.apache.druid.query.QueryRunner)5 SegmentDescriptor (org.apache.druid.query.SegmentDescriptor)5 Map (java.util.Map)4 QueryPlus (org.apache.druid.query.QueryPlus)4 MultipleIntervalSegmentSpec (org.apache.druid.query.spec.MultipleIntervalSegmentSpec)4 ImmutableMap (com.google.common.collect.ImmutableMap)3 HashMap (java.util.HashMap)3 CountDownLatch (java.util.concurrent.CountDownLatch)3 Druids (org.apache.druid.query.Druids)3 FinalizeResultsQueryRunner (org.apache.druid.query.FinalizeResultsQueryRunner)3