use of org.apache.druid.client.cache.Cache in project druid by druid-io.
the class CachingClusteredClientTest method testCachingOverBulkLimitEnforcesLimit.
@Test
@SuppressWarnings("unchecked")
public void testCachingOverBulkLimitEnforcesLimit() {
final int limit = 10;
final Interval interval = Intervals.of("2011-01-01/2011-01-02");
final TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource(DATA_SOURCE).intervals(new MultipleIntervalSegmentSpec(ImmutableList.of(interval))).filters(DIM_FILTER).granularity(GRANULARITY).aggregators(AGGS).postAggregators(POST_AGGS).context(CONTEXT).randomQueryId().build();
final ResponseContext context = initializeResponseContext();
final Cache cache = EasyMock.createStrictMock(Cache.class);
final Capture<Iterable<Cache.NamedKey>> cacheKeyCapture = EasyMock.newCapture();
EasyMock.expect(cache.getBulk(EasyMock.capture(cacheKeyCapture))).andReturn(ImmutableMap.of()).once();
EasyMock.replay(cache);
client = makeClient(new ForegroundCachePopulator(JSON_MAPPER, new CachePopulatorStats(), -1), cache, limit);
final DruidServer lastServer = servers[random.nextInt(servers.length)];
final DataSegment dataSegment = EasyMock.createNiceMock(DataSegment.class);
EasyMock.expect(dataSegment.getId()).andReturn(SegmentId.dummy(DATA_SOURCE)).anyTimes();
EasyMock.replay(dataSegment);
final ServerSelector selector = new ServerSelector(dataSegment, new HighestPriorityTierSelectorStrategy(new RandomServerSelectorStrategy()));
selector.addServerAndUpdateSegment(new QueryableDruidServer(lastServer, null), dataSegment);
timeline.add(interval, "v", new SingleElementPartitionChunk<>(selector));
getDefaultQueryRunner().run(QueryPlus.wrap(query), context);
Assert.assertTrue("Capture cache keys", cacheKeyCapture.hasCaptured());
Assert.assertTrue("Cache key below limit", ImmutableList.copyOf(cacheKeyCapture.getValue()).size() <= limit);
EasyMock.verify(cache);
EasyMock.reset(cache);
cacheKeyCapture.reset();
EasyMock.expect(cache.getBulk(EasyMock.capture(cacheKeyCapture))).andReturn(ImmutableMap.of()).once();
EasyMock.replay(cache);
client = makeClient(new ForegroundCachePopulator(JSON_MAPPER, new CachePopulatorStats(), -1), cache, 0);
getDefaultQueryRunner().run(QueryPlus.wrap(query), context);
EasyMock.verify(cache);
EasyMock.verify(dataSegment);
Assert.assertTrue("Capture cache keys", cacheKeyCapture.hasCaptured());
Assert.assertTrue("Cache Keys empty", ImmutableList.copyOf(cacheKeyCapture.getValue()).isEmpty());
}
use of org.apache.druid.client.cache.Cache in project druid by druid-io.
the class CachingQueryRunnerTest method testNullCacheKeyPrefix.
@Test
public void testNullCacheKeyPrefix() {
Query query = new TopNQueryBuilder().dataSource("ds").dimension("top_dim").metric("imps").threshold(3).intervals("2011-01-05/2011-01-10").aggregators(AGGS).granularity(Granularities.ALL).build();
QueryToolChest toolchest = new TopNQueryQueryToolChest(new TopNQueryConfig());
Cache cache = EasyMock.mock(Cache.class);
EasyMock.replay(cache);
CachingQueryRunner queryRunner = makeCachingQueryRunner(null, cache, toolchest, Sequences.empty());
Assert.assertFalse(queryRunner.canPopulateCache(query, toolchest.getCacheStrategy(query)));
Assert.assertFalse(queryRunner.canUseCache(query, toolchest.getCacheStrategy(query)));
queryRunner.run(QueryPlus.wrap(query));
EasyMock.verifyUnexpectedCalls(cache);
}
use of org.apache.druid.client.cache.Cache in project druid by druid-io.
the class CachingQueryRunnerTest method testCloseAndPopulate.
private void testCloseAndPopulate(List<Result> expectedRes, List<Result> expectedCacheRes, Query query, QueryToolChest toolchest) throws Exception {
final AssertingClosable closable = new AssertingClosable();
final Sequence resultSeq = Sequences.wrap(Sequences.simple(expectedRes), new SequenceWrapper() {
@Override
public void before() {
Assert.assertFalse(closable.isClosed());
}
@Override
public void after(boolean isDone, Throwable thrown) {
closable.close();
}
});
final CountDownLatch cacheMustBePutOnce = new CountDownLatch(1);
Cache cache = new Cache() {
private final ConcurrentMap<NamedKey, byte[]> baseMap = new ConcurrentHashMap<>();
@Override
public byte[] get(NamedKey key) {
return baseMap.get(key);
}
@Override
public void put(NamedKey key, byte[] value) {
baseMap.put(key, value);
cacheMustBePutOnce.countDown();
}
@Override
public Map<NamedKey, byte[]> getBulk(Iterable<NamedKey> keys) {
return null;
}
@Override
public void close(String namespace) {
}
@Override
public void close() {
}
@Override
public CacheStats getStats() {
return null;
}
@Override
public boolean isLocal() {
return true;
}
@Override
public void doMonitor(ServiceEmitter emitter) {
}
};
byte[] keyPrefix = RandomUtils.nextBytes(10);
CachingQueryRunner runner = makeCachingQueryRunner(keyPrefix, cache, toolchest, resultSeq);
CacheStrategy cacheStrategy = toolchest.getCacheStrategy(query);
Cache.NamedKey cacheKey = CacheUtil.computeSegmentCacheKey(CACHE_ID, SEGMENT_DESCRIPTOR, Bytes.concat(keyPrefix, cacheStrategy.computeCacheKey(query)));
Assert.assertTrue(runner.canPopulateCache(query, cacheStrategy));
Sequence res = runner.run(QueryPlus.wrap(query));
// base sequence is not closed yet
Assert.assertFalse("sequence must not be closed", closable.isClosed());
Assert.assertNull("cache must be empty", cache.get(cacheKey));
List results = res.toList();
Assert.assertTrue(closable.isClosed());
Assert.assertEquals(expectedRes.toString(), results.toString());
// wait for background caching finish
// wait at most 10 seconds to fail the test to avoid block overall tests
Assert.assertTrue("cache must be populated", cacheMustBePutOnce.await(10, TimeUnit.SECONDS));
byte[] cacheValue = cache.get(cacheKey);
Assert.assertNotNull(cacheValue);
Function<Object, Result> fn = cacheStrategy.pullFromSegmentLevelCache();
List<Result> cacheResults = Lists.newArrayList(Iterators.transform(objectMapper.readValues(objectMapper.getFactory().createParser(cacheValue), cacheStrategy.getCacheObjectClazz()), fn));
Assert.assertEquals(expectedCacheRes.toString(), cacheResults.toString());
}
use of org.apache.druid.client.cache.Cache in project druid by druid-io.
the class CachingQueryRunnerTest method testNullStrategy.
@Test
public void testNullStrategy() {
Query query = new TopNQueryBuilder().dataSource("ds").dimension("top_dim").metric("imps").threshold(3).intervals("2011-01-05/2011-01-10").aggregators(AGGS).granularity(Granularities.ALL).build();
QueryToolChest toolchest = EasyMock.mock(QueryToolChest.class);
Cache cache = EasyMock.mock(Cache.class);
EasyMock.expect(toolchest.getCacheStrategy(query)).andReturn(null);
EasyMock.replay(cache, toolchest);
CachingQueryRunner queryRunner = makeCachingQueryRunner(new byte[0], cache, toolchest, Sequences.empty());
Assert.assertFalse(queryRunner.canPopulateCache(query, null));
Assert.assertFalse(queryRunner.canUseCache(query, null));
queryRunner.run(QueryPlus.wrap(query));
EasyMock.verifyUnexpectedCalls(cache);
}
use of org.apache.druid.client.cache.Cache in project druid by druid-io.
the class ServerManager method buildAndDecorateQueryRunner.
private <T> QueryRunner<T> buildAndDecorateQueryRunner(final QueryRunnerFactory<T, Query<T>> factory, final QueryToolChest<T, Query<T>> toolChest, final SegmentReference segment, final Optional<byte[]> cacheKeyPrefix, final SegmentDescriptor segmentDescriptor, final AtomicLong cpuTimeAccumulator) {
final SpecificSegmentSpec segmentSpec = new SpecificSegmentSpec(segmentDescriptor);
final SegmentId segmentId = segment.getId();
final Interval segmentInterval = segment.getDataInterval();
// If the segment is closed after this line, ReferenceCountingSegmentQueryRunner will handle and do the right thing.
if (segmentId == null || segmentInterval == null) {
return new ReportTimelineMissingSegmentQueryRunner<>(segmentDescriptor);
}
String segmentIdString = segmentId.toString();
MetricsEmittingQueryRunner<T> metricsEmittingQueryRunnerInner = new MetricsEmittingQueryRunner<>(emitter, toolChest, new ReferenceCountingSegmentQueryRunner<>(factory, segment, segmentDescriptor), QueryMetrics::reportSegmentTime, queryMetrics -> queryMetrics.segment(segmentIdString));
StorageAdapter storageAdapter = segment.asStorageAdapter();
long segmentMaxTime = storageAdapter.getMaxTime().getMillis();
long segmentMinTime = storageAdapter.getMinTime().getMillis();
Interval actualDataInterval = Intervals.utc(segmentMinTime, segmentMaxTime + 1);
CachingQueryRunner<T> cachingQueryRunner = new CachingQueryRunner<>(segmentIdString, cacheKeyPrefix, segmentDescriptor, actualDataInterval, objectMapper, cache, toolChest, metricsEmittingQueryRunnerInner, cachePopulator, cacheConfig);
BySegmentQueryRunner<T> bySegmentQueryRunner = new BySegmentQueryRunner<>(segmentId, segmentInterval.getStart(), cachingQueryRunner);
MetricsEmittingQueryRunner<T> metricsEmittingQueryRunnerOuter = new MetricsEmittingQueryRunner<>(emitter, toolChest, bySegmentQueryRunner, QueryMetrics::reportSegmentAndCacheTime, queryMetrics -> queryMetrics.segment(segmentIdString)).withWaitMeasuredFromNow();
SpecificSegmentQueryRunner<T> specificSegmentQueryRunner = new SpecificSegmentQueryRunner<>(metricsEmittingQueryRunnerOuter, segmentSpec);
PerSegmentOptimizingQueryRunner<T> perSegmentOptimizingQueryRunner = new PerSegmentOptimizingQueryRunner<>(specificSegmentQueryRunner, new PerSegmentQueryOptimizationContext(segmentDescriptor));
return new SetAndVerifyContextQueryRunner<>(serverConfig, CPUTimeMetricQueryRunner.safeBuild(perSegmentOptimizingQueryRunner, toolChest, emitter, cpuTimeAccumulator, false));
}
Aggregations