use of org.apache.druid.client.cache.ForegroundCachePopulator in project druid by druid-io.
the class QueryRunnerBasedOnClusteredClientTestBase method setupTestBase.
@Before
public void setupTestBase() {
segmentGenerator = new SegmentGenerator();
httpClient = new TestHttpClient(objectMapper);
simpleServerView = new SimpleServerView(toolChestWarehouse, objectMapper, httpClient);
cachingClusteredClient = new CachingClusteredClient(toolChestWarehouse, simpleServerView, MapCache.create(0), objectMapper, new ForegroundCachePopulator(objectMapper, new CachePopulatorStats(), 0), new CacheConfig(), new DruidHttpClientConfig(), QueryStackTests.getProcessingConfig(USE_PARALLEL_MERGE_POOL_CONFIGURED, DruidProcessingConfig.DEFAULT_NUM_MERGE_BUFFERS), ForkJoinPool.commonPool(), QueryStackTests.DEFAULT_NOOP_SCHEDULER, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), new NoopServiceEmitter());
servers = new ArrayList<>();
}
use of org.apache.druid.client.cache.ForegroundCachePopulator in project druid by druid-io.
the class CachingClusteredClientTest method setUp.
@Before
public void setUp() {
timeline = new VersionedIntervalTimeline<>(Ordering.natural());
serverView = EasyMock.createNiceMock(TimelineServerView.class);
cache = MapCache.create(100000);
client = makeClient(new ForegroundCachePopulator(JSON_MAPPER, new CachePopulatorStats(), -1));
servers = new DruidServer[] { new DruidServer("test1", "test1", null, 10, ServerType.HISTORICAL, "bye", 0), new DruidServer("test2", "test2", null, 10, ServerType.HISTORICAL, "bye", 0), new DruidServer("test3", "test3", null, 10, ServerType.HISTORICAL, "bye", 0), new DruidServer("test4", "test4", null, 10, ServerType.HISTORICAL, "bye", 0), new DruidServer("test5", "test5", null, 10, ServerType.HISTORICAL, "bye", 0) };
}
use of org.apache.druid.client.cache.ForegroundCachePopulator in project druid by druid-io.
the class CachingClusteredClientTest method testCachingOverBulkLimitEnforcesLimit.
@Test
@SuppressWarnings("unchecked")
public void testCachingOverBulkLimitEnforcesLimit() {
final int limit = 10;
final Interval interval = Intervals.of("2011-01-01/2011-01-02");
final TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource(DATA_SOURCE).intervals(new MultipleIntervalSegmentSpec(ImmutableList.of(interval))).filters(DIM_FILTER).granularity(GRANULARITY).aggregators(AGGS).postAggregators(POST_AGGS).context(CONTEXT).randomQueryId().build();
final ResponseContext context = initializeResponseContext();
final Cache cache = EasyMock.createStrictMock(Cache.class);
final Capture<Iterable<Cache.NamedKey>> cacheKeyCapture = EasyMock.newCapture();
EasyMock.expect(cache.getBulk(EasyMock.capture(cacheKeyCapture))).andReturn(ImmutableMap.of()).once();
EasyMock.replay(cache);
client = makeClient(new ForegroundCachePopulator(JSON_MAPPER, new CachePopulatorStats(), -1), cache, limit);
final DruidServer lastServer = servers[random.nextInt(servers.length)];
final DataSegment dataSegment = EasyMock.createNiceMock(DataSegment.class);
EasyMock.expect(dataSegment.getId()).andReturn(SegmentId.dummy(DATA_SOURCE)).anyTimes();
EasyMock.replay(dataSegment);
final ServerSelector selector = new ServerSelector(dataSegment, new HighestPriorityTierSelectorStrategy(new RandomServerSelectorStrategy()));
selector.addServerAndUpdateSegment(new QueryableDruidServer(lastServer, null), dataSegment);
timeline.add(interval, "v", new SingleElementPartitionChunk<>(selector));
getDefaultQueryRunner().run(QueryPlus.wrap(query), context);
Assert.assertTrue("Capture cache keys", cacheKeyCapture.hasCaptured());
Assert.assertTrue("Cache key below limit", ImmutableList.copyOf(cacheKeyCapture.getValue()).size() <= limit);
EasyMock.verify(cache);
EasyMock.reset(cache);
cacheKeyCapture.reset();
EasyMock.expect(cache.getBulk(EasyMock.capture(cacheKeyCapture))).andReturn(ImmutableMap.of()).once();
EasyMock.replay(cache);
client = makeClient(new ForegroundCachePopulator(JSON_MAPPER, new CachePopulatorStats(), -1), cache, 0);
getDefaultQueryRunner().run(QueryPlus.wrap(query), context);
EasyMock.verify(cache);
EasyMock.verify(dataSegment);
Assert.assertTrue("Capture cache keys", cacheKeyCapture.hasCaptured());
Assert.assertTrue("Cache Keys empty", ImmutableList.copyOf(cacheKeyCapture.getValue()).isEmpty());
}
use of org.apache.druid.client.cache.ForegroundCachePopulator in project druid by druid-io.
the class ServerManagerTest method setUp.
@Before
public void setUp() {
EmittingLogger.registerEmitter(new NoopServiceEmitter());
queryWaitLatch = new CountDownLatch(1);
queryWaitYieldLatch = new CountDownLatch(1);
queryNotifyLatch = new CountDownLatch(1);
factory = new MyQueryRunnerFactory(queryWaitLatch, queryWaitYieldLatch, queryNotifyLatch);
serverManagerExec = Executors.newFixedThreadPool(2);
segmentManager = new SegmentManager(new SegmentLoader() {
@Override
public ReferenceCountingSegment getSegment(final DataSegment segment, boolean lazy, SegmentLazyLoadFailCallback SegmentLazyLoadFailCallback) {
return ReferenceCountingSegment.wrapSegment(new SegmentForTesting(MapUtils.getString(segment.getLoadSpec(), "version"), (Interval) segment.getLoadSpec().get("interval")), segment.getShardSpec());
}
@Override
public void cleanup(DataSegment segment) {
}
});
serverManager = new ServerManager(new QueryRunnerFactoryConglomerate() {
@Override
public <T, QueryType extends Query<T>> QueryRunnerFactory<T, QueryType> findFactory(QueryType query) {
if (query instanceof SearchQuery) {
return (QueryRunnerFactory) factory;
} else {
return null;
}
}
}, new NoopServiceEmitter(), new ForwardingQueryProcessingPool(serverManagerExec), new ForegroundCachePopulator(new DefaultObjectMapper(), new CachePopulatorStats(), -1), new DefaultObjectMapper(), new LocalCacheProvider().get(), new CacheConfig(), segmentManager, NoopJoinableFactory.INSTANCE, new ServerConfig());
loadQueryable("test", "1", Intervals.of("P1d/2011-04-01"));
loadQueryable("test", "1", Intervals.of("P1d/2011-04-02"));
loadQueryable("test", "2", Intervals.of("P1d/2011-04-02"));
loadQueryable("test", "1", Intervals.of("P1d/2011-04-03"));
loadQueryable("test", "1", Intervals.of("P1d/2011-04-04"));
loadQueryable("test", "1", Intervals.of("P1d/2011-04-05"));
loadQueryable("test", "2", Intervals.of("PT1h/2011-04-04T01"));
loadQueryable("test", "2", Intervals.of("PT1h/2011-04-04T02"));
loadQueryable("test", "2", Intervals.of("PT1h/2011-04-04T03"));
loadQueryable("test", "2", Intervals.of("PT1h/2011-04-04T05"));
loadQueryable("test", "2", Intervals.of("PT1h/2011-04-04T06"));
loadQueryable("test2", "1", Intervals.of("P1d/2011-04-01"));
loadQueryable("test2", "1", Intervals.of("P1d/2011-04-02"));
}
use of org.apache.druid.client.cache.ForegroundCachePopulator in project druid by druid-io.
the class MovingAverageQueryTest method testQuery.
/**
* Validate that the specified query behaves correctly.
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
@Test
public void testQuery() throws IOException {
Query<?> query = jsonMapper.readValue(getQueryString(), Query.class);
Assert.assertThat(query, IsInstanceOf.instanceOf(getExpectedQueryType()));
List<MapBasedRow> expectedResults = jsonMapper.readValue(getExpectedResultString(), getExpectedResultType());
Assert.assertNotNull(expectedResults);
Assert.assertThat(expectedResults, IsInstanceOf.instanceOf(List.class));
CachingClusteredClient baseClient = new CachingClusteredClient(warehouse, new TimelineServerView() {
@Override
public Optional<? extends TimelineLookup<String, ServerSelector>> getTimeline(DataSourceAnalysis analysis) {
return Optional.empty();
}
@Override
public List<ImmutableDruidServer> getDruidServers() {
return null;
}
@Override
public <T> QueryRunner<T> getQueryRunner(DruidServer server) {
return null;
}
@Override
public void registerTimelineCallback(Executor exec, TimelineCallback callback) {
}
@Override
public void registerSegmentCallback(Executor exec, SegmentCallback callback) {
}
@Override
public void registerServerRemovedCallback(Executor exec, ServerRemovedCallback callback) {
}
}, MapCache.create(100000), jsonMapper, new ForegroundCachePopulator(jsonMapper, new CachePopulatorStats(), -1), new CacheConfig(), new DruidHttpClientConfig() {
@Override
public long getMaxQueuedBytes() {
return 0L;
}
}, new DruidProcessingConfig() {
@Override
public String getFormatString() {
return null;
}
}, ForkJoinPool.commonPool(), QueryStackTests.DEFAULT_NOOP_SCHEDULER, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), new NoopServiceEmitter());
ClientQuerySegmentWalker walker = new ClientQuerySegmentWalker(new ServiceEmitter("", "", null) {
@Override
public void emit(Event event) {
}
}, baseClient, null, /* local client; unused in this test, so pass in null */
warehouse, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), retryConfig, jsonMapper, serverConfig, null, new CacheConfig());
defineMocks();
QueryPlus queryPlus = QueryPlus.wrap(query);
final Sequence<?> res = query.getRunner(walker).run(queryPlus);
List actualResults = new ArrayList();
actualResults = (List<MapBasedRow>) res.accumulate(actualResults, Accumulators.list());
expectedResults = consistentTypeCasting(expectedResults);
actualResults = consistentTypeCasting(actualResults);
Assert.assertEquals(expectedResults, actualResults);
}
Aggregations