use of org.apache.druid.client.selector.RandomServerSelectorStrategy in project druid by druid-io.
the class CachingClusteredClientTest method testEtagforDifferentQueryInterval.
@Test
public void testEtagforDifferentQueryInterval() {
final Interval interval = Intervals.of("2016-01-01/2016-01-02");
final Interval queryInterval = Intervals.of("2016-01-01T14:00:00/2016-01-02T14:00:00");
final Interval queryInterval2 = Intervals.of("2016-01-01T18:00:00/2016-01-02T18:00:00");
final DataSegment dataSegment = new DataSegment("dataSource", interval, "ver", ImmutableMap.of("type", "hdfs", "path", "/tmp"), ImmutableList.of("product"), ImmutableList.of("visited_sum"), NoneShardSpec.instance(), 9, 12334);
final ServerSelector selector = new ServerSelector(dataSegment, new HighestPriorityTierSelectorStrategy(new RandomServerSelectorStrategy()));
selector.addServerAndUpdateSegment(new QueryableDruidServer(servers[0], null), dataSegment);
timeline.add(interval, "ver", new SingleElementPartitionChunk<>(selector));
final TimeBoundaryQuery query = Druids.newTimeBoundaryQueryBuilder().dataSource(DATA_SOURCE).intervals(new MultipleIntervalSegmentSpec(ImmutableList.of(queryInterval))).context(ImmutableMap.of("If-None-Match", "aVJV29CJY93rszVW/QBy0arWZo0=")).randomQueryId().build();
final TimeBoundaryQuery query2 = Druids.newTimeBoundaryQueryBuilder().dataSource(DATA_SOURCE).intervals(new MultipleIntervalSegmentSpec(ImmutableList.of(queryInterval2))).context(ImmutableMap.of("If-None-Match", "aVJV29CJY93rszVW/QBy0arWZo0=")).randomQueryId().build();
final ResponseContext responseContext = initializeResponseContext();
getDefaultQueryRunner().run(QueryPlus.wrap(query), responseContext);
final String etag1 = responseContext.getEntityTag();
getDefaultQueryRunner().run(QueryPlus.wrap(query2), responseContext);
final String etag2 = responseContext.getEntityTag();
Assert.assertNotEquals(etag1, etag2);
}
use of org.apache.druid.client.selector.RandomServerSelectorStrategy in project druid by druid-io.
the class CachingClusteredClientTest method testCachingOverBulkLimitEnforcesLimit.
@Test
@SuppressWarnings("unchecked")
public void testCachingOverBulkLimitEnforcesLimit() {
final int limit = 10;
final Interval interval = Intervals.of("2011-01-01/2011-01-02");
final TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource(DATA_SOURCE).intervals(new MultipleIntervalSegmentSpec(ImmutableList.of(interval))).filters(DIM_FILTER).granularity(GRANULARITY).aggregators(AGGS).postAggregators(POST_AGGS).context(CONTEXT).randomQueryId().build();
final ResponseContext context = initializeResponseContext();
final Cache cache = EasyMock.createStrictMock(Cache.class);
final Capture<Iterable<Cache.NamedKey>> cacheKeyCapture = EasyMock.newCapture();
EasyMock.expect(cache.getBulk(EasyMock.capture(cacheKeyCapture))).andReturn(ImmutableMap.of()).once();
EasyMock.replay(cache);
client = makeClient(new ForegroundCachePopulator(JSON_MAPPER, new CachePopulatorStats(), -1), cache, limit);
final DruidServer lastServer = servers[random.nextInt(servers.length)];
final DataSegment dataSegment = EasyMock.createNiceMock(DataSegment.class);
EasyMock.expect(dataSegment.getId()).andReturn(SegmentId.dummy(DATA_SOURCE)).anyTimes();
EasyMock.replay(dataSegment);
final ServerSelector selector = new ServerSelector(dataSegment, new HighestPriorityTierSelectorStrategy(new RandomServerSelectorStrategy()));
selector.addServerAndUpdateSegment(new QueryableDruidServer(lastServer, null), dataSegment);
timeline.add(interval, "v", new SingleElementPartitionChunk<>(selector));
getDefaultQueryRunner().run(QueryPlus.wrap(query), context);
Assert.assertTrue("Capture cache keys", cacheKeyCapture.hasCaptured());
Assert.assertTrue("Cache key below limit", ImmutableList.copyOf(cacheKeyCapture.getValue()).size() <= limit);
EasyMock.verify(cache);
EasyMock.reset(cache);
cacheKeyCapture.reset();
EasyMock.expect(cache.getBulk(EasyMock.capture(cacheKeyCapture))).andReturn(ImmutableMap.of()).once();
EasyMock.replay(cache);
client = makeClient(new ForegroundCachePopulator(JSON_MAPPER, new CachePopulatorStats(), -1), cache, 0);
getDefaultQueryRunner().run(QueryPlus.wrap(query), context);
EasyMock.verify(cache);
EasyMock.verify(dataSegment);
Assert.assertTrue("Capture cache keys", cacheKeyCapture.hasCaptured());
Assert.assertTrue("Cache Keys empty", ImmutableList.copyOf(cacheKeyCapture.getValue()).isEmpty());
}
use of org.apache.druid.client.selector.RandomServerSelectorStrategy in project druid by druid-io.
the class CachingClusteredClientPerfTest method testGetQueryRunnerForSegments_singleIntervalLargeSegments.
@Test(timeout = 10_000)
public void testGetQueryRunnerForSegments_singleIntervalLargeSegments() {
final int segmentCount = 30_000;
final Interval interval = Intervals.of("2021-02-13/2021-02-14");
final List<SegmentDescriptor> segmentDescriptors = new ArrayList<>(segmentCount);
final List<DataSegment> dataSegments = new ArrayList<>(segmentCount);
final VersionedIntervalTimeline<String, ServerSelector> timeline = new VersionedIntervalTimeline<>(Ordering.natural());
final DruidServer server = new DruidServer("server", "localhost:9000", null, Long.MAX_VALUE, ServerType.HISTORICAL, DruidServer.DEFAULT_TIER, DruidServer.DEFAULT_PRIORITY);
for (int ii = 0; ii < segmentCount; ii++) {
segmentDescriptors.add(new SegmentDescriptor(interval, "1", ii));
DataSegment segment = makeDataSegment("test", interval, "1", ii);
dataSegments.add(segment);
}
timeline.addAll(Iterators.transform(dataSegments.iterator(), segment -> {
ServerSelector ss = new ServerSelector(segment, new HighestPriorityTierSelectorStrategy(new RandomServerSelectorStrategy()));
ss.addServerAndUpdateSegment(new QueryableDruidServer(server, new MockQueryRunner()), segment);
return new VersionedIntervalTimeline.PartitionChunkEntry<>(segment.getInterval(), segment.getVersion(), segment.getShardSpec().createChunk(ss));
}));
TimelineServerView serverView = Mockito.mock(TimelineServerView.class);
QueryScheduler queryScheduler = Mockito.mock(QueryScheduler.class);
// mock scheduler to return same sequence as argument
Mockito.when(queryScheduler.run(any(), any())).thenAnswer(i -> i.getArgument(1));
Mockito.when(queryScheduler.prioritizeAndLaneQuery(any(), any())).thenAnswer(i -> ((QueryPlus) i.getArgument(0)).getQuery());
Mockito.doReturn(Optional.of(timeline)).when(serverView).getTimeline(any());
Mockito.doReturn(new MockQueryRunner()).when(serverView).getQueryRunner(any());
CachingClusteredClient cachingClusteredClient = new CachingClusteredClient(new MockQueryToolChestWareHouse(), serverView, MapCache.create(1024), TestHelper.makeJsonMapper(), Mockito.mock(CachePopulator.class), new CacheConfig(), Mockito.mock(DruidHttpClientConfig.class), Mockito.mock(DruidProcessingConfig.class), ForkJoinPool.commonPool(), queryScheduler, NoopJoinableFactory.INSTANCE, new NoopServiceEmitter());
Query<SegmentDescriptor> fakeQuery = makeFakeQuery(interval);
QueryRunner<SegmentDescriptor> queryRunner = cachingClusteredClient.getQueryRunnerForSegments(fakeQuery, segmentDescriptors);
Sequence<SegmentDescriptor> sequence = queryRunner.run(QueryPlus.wrap(fakeQuery));
Assert.assertEquals(segmentDescriptors, sequence.toList());
}
use of org.apache.druid.client.selector.RandomServerSelectorStrategy in project druid by druid-io.
the class CachingClusteredClientTest method testIfNoneMatch.
@Test
public void testIfNoneMatch() {
Interval interval = Intervals.of("2016/2017");
final DataSegment dataSegment = new DataSegment("dataSource", interval, "ver", ImmutableMap.of("type", "hdfs", "path", "/tmp"), ImmutableList.of("product"), ImmutableList.of("visited_sum"), NoneShardSpec.instance(), 9, 12334);
final ServerSelector selector = new ServerSelector(dataSegment, new HighestPriorityTierSelectorStrategy(new RandomServerSelectorStrategy()));
selector.addServerAndUpdateSegment(new QueryableDruidServer(servers[0], null), dataSegment);
timeline.add(interval, "ver", new SingleElementPartitionChunk<>(selector));
TimeBoundaryQuery query = Druids.newTimeBoundaryQueryBuilder().dataSource(DATA_SOURCE).intervals(new MultipleIntervalSegmentSpec(ImmutableList.of(interval))).context(ImmutableMap.of("If-None-Match", "aVJV29CJY93rszVW/QBy0arWZo0=")).randomQueryId().build();
final ResponseContext responseContext = initializeResponseContext();
getDefaultQueryRunner().run(QueryPlus.wrap(query), responseContext);
Assert.assertEquals("MDs2yIUvYLVzaG6zmwTH1plqaYE=", responseContext.getEntityTag());
}
use of org.apache.druid.client.selector.RandomServerSelectorStrategy in project druid by druid-io.
the class CachingClusteredClientTest method populateTimeline.
private List<Map<DruidServer, ServerExpectations>> populateTimeline(List<Interval> queryIntervals, List<List<Iterable<Result<Object>>>> expectedResults, int numQueryIntervals, List<Object> mocks) {
timeline = new VersionedIntervalTimeline<>(Ordering.natural());
final List<Map<DruidServer, ServerExpectations>> serverExpectationList = new ArrayList<>();
for (int k = 0; k < numQueryIntervals + 1; ++k) {
final int numChunks = expectedResults.get(k).size();
final TreeMap<DruidServer, ServerExpectations> serverExpectations = new TreeMap<>();
serverExpectationList.add(serverExpectations);
for (int j = 0; j < numChunks; ++j) {
DruidServer lastServer = servers[random.nextInt(servers.length)];
serverExpectations.computeIfAbsent(lastServer, server -> new ServerExpectations(server, makeMock(mocks, QueryRunner.class)));
final ShardSpec shardSpec;
if (numChunks == 1) {
shardSpec = new SingleDimensionShardSpec("dimAll", null, null, 0, 1);
} else {
String start = null;
String end = null;
if (j > 0) {
start = String.valueOf(j);
}
if (j + 1 < numChunks) {
end = String.valueOf(j + 1);
}
shardSpec = new SingleDimensionShardSpec("dim" + k, start, end, j, numChunks);
}
DataSegment mockSegment = makeMock(mocks, DataSegment.class);
ServerExpectation<Object> expectation = new ServerExpectation<>(// interval/chunk
SegmentId.dummy(StringUtils.format("%s_%s", k, j)), queryIntervals.get(k), mockSegment, shardSpec, expectedResults.get(k).get(j));
serverExpectations.get(lastServer).addExpectation(expectation);
EasyMock.expect(mockSegment.getSize()).andReturn(0L).anyTimes();
EasyMock.replay(mockSegment);
ServerSelector selector = new ServerSelector(expectation.getSegment(), new HighestPriorityTierSelectorStrategy(new RandomServerSelectorStrategy()));
selector.addServerAndUpdateSegment(new QueryableDruidServer(lastServer, null), selector.getSegment());
EasyMock.reset(mockSegment);
EasyMock.expect(mockSegment.getShardSpec()).andReturn(shardSpec).anyTimes();
timeline.add(queryIntervals.get(k), String.valueOf(k), shardSpec.createChunk(selector));
}
}
return serverExpectationList;
}
Aggregations