Search in sources :

Example 1 with Cache

use of io.druid.client.cache.Cache in project druid by druid-io.

the class CachingClusteredClient method run.

@Override
public Sequence<T> run(final Query<T> query, final Map<String, Object> responseContext) {
    final QueryToolChest<T, Query<T>> toolChest = warehouse.getToolChest(query);
    final CacheStrategy<T, Object, Query<T>> strategy = toolChest.getCacheStrategy(query);
    final Map<DruidServer, List<SegmentDescriptor>> serverSegments = Maps.newTreeMap();
    final List<Pair<Interval, byte[]>> cachedResults = Lists.newArrayList();
    final Map<String, CachePopulator> cachePopulatorMap = Maps.newHashMap();
    final boolean useCache = CacheUtil.useCacheOnBrokers(query, strategy, cacheConfig);
    final boolean populateCache = CacheUtil.populateCacheOnBrokers(query, strategy, cacheConfig);
    final boolean isBySegment = BaseQuery.getContextBySegment(query, false);
    final ImmutableMap.Builder<String, Object> contextBuilder = new ImmutableMap.Builder<>();
    final int priority = BaseQuery.getContextPriority(query, 0);
    contextBuilder.put("priority", priority);
    if (populateCache) {
        // prevent down-stream nodes from caching results as well if we are populating the cache
        contextBuilder.put(CacheConfig.POPULATE_CACHE, false);
        contextBuilder.put("bySegment", true);
    }
    TimelineLookup<String, ServerSelector> timeline = serverView.getTimeline(query.getDataSource());
    if (timeline == null) {
        return Sequences.empty();
    }
    // build set of segments to query
    Set<Pair<ServerSelector, SegmentDescriptor>> segments = Sets.newLinkedHashSet();
    List<TimelineObjectHolder<String, ServerSelector>> serversLookup = Lists.newLinkedList();
    // Note that enabling this leads to putting uncovered intervals information in the response headers
    // and might blow up in some cases https://github.com/druid-io/druid/issues/2108
    int uncoveredIntervalsLimit = BaseQuery.getContextUncoveredIntervalsLimit(query, 0);
    if (uncoveredIntervalsLimit > 0) {
        List<Interval> uncoveredIntervals = Lists.newArrayListWithCapacity(uncoveredIntervalsLimit);
        boolean uncoveredIntervalsOverflowed = false;
        for (Interval interval : query.getIntervals()) {
            Iterable<TimelineObjectHolder<String, ServerSelector>> lookup = timeline.lookup(interval);
            long startMillis = interval.getStartMillis();
            long endMillis = interval.getEndMillis();
            for (TimelineObjectHolder<String, ServerSelector> holder : lookup) {
                Interval holderInterval = holder.getInterval();
                long intervalStart = holderInterval.getStartMillis();
                if (!uncoveredIntervalsOverflowed && startMillis != intervalStart) {
                    if (uncoveredIntervalsLimit > uncoveredIntervals.size()) {
                        uncoveredIntervals.add(new Interval(startMillis, intervalStart));
                    } else {
                        uncoveredIntervalsOverflowed = true;
                    }
                }
                startMillis = holderInterval.getEndMillis();
                serversLookup.add(holder);
            }
            if (!uncoveredIntervalsOverflowed && startMillis < endMillis) {
                if (uncoveredIntervalsLimit > uncoveredIntervals.size()) {
                    uncoveredIntervals.add(new Interval(startMillis, endMillis));
                } else {
                    uncoveredIntervalsOverflowed = true;
                }
            }
        }
        if (!uncoveredIntervals.isEmpty()) {
            // This returns intervals for which NO segment is present.
            // Which is not necessarily an indication that the data doesn't exist or is
            // incomplete. The data could exist and just not be loaded yet.  In either
            // case, though, this query will not include any data from the identified intervals.
            responseContext.put("uncoveredIntervals", uncoveredIntervals);
            responseContext.put("uncoveredIntervalsOverflowed", uncoveredIntervalsOverflowed);
        }
    } else {
        for (Interval interval : query.getIntervals()) {
            Iterables.addAll(serversLookup, timeline.lookup(interval));
        }
    }
    // Let tool chest filter out unneeded segments
    final List<TimelineObjectHolder<String, ServerSelector>> filteredServersLookup = toolChest.filterSegments(query, serversLookup);
    Map<String, Optional<RangeSet<String>>> dimensionRangeCache = Maps.newHashMap();
    // Filter unneeded chunks based on partition dimension
    for (TimelineObjectHolder<String, ServerSelector> holder : filteredServersLookup) {
        final Set<PartitionChunk<ServerSelector>> filteredChunks = DimFilterUtils.filterShards(query.getFilter(), holder.getObject(), new Function<PartitionChunk<ServerSelector>, ShardSpec>() {

            @Override
            public ShardSpec apply(PartitionChunk<ServerSelector> input) {
                return input.getObject().getSegment().getShardSpec();
            }
        }, dimensionRangeCache);
        for (PartitionChunk<ServerSelector> chunk : filteredChunks) {
            ServerSelector selector = chunk.getObject();
            final SegmentDescriptor descriptor = new SegmentDescriptor(holder.getInterval(), holder.getVersion(), chunk.getChunkNumber());
            segments.add(Pair.of(selector, descriptor));
        }
    }
    final byte[] queryCacheKey;
    if (// implies strategy != null
    (populateCache || useCache) && // explicit bySegment queries are never cached
    !isBySegment) {
        queryCacheKey = strategy.computeCacheKey(query);
    } else {
        queryCacheKey = null;
    }
    if (query.getContext().get(QueryResource.HDR_IF_NONE_MATCH) != null) {
        String prevEtag = (String) query.getContext().get(QueryResource.HDR_IF_NONE_MATCH);
        //compute current Etag
        Hasher hasher = Hashing.sha1().newHasher();
        boolean hasOnlyHistoricalSegments = true;
        for (Pair<ServerSelector, SegmentDescriptor> p : segments) {
            if (!p.lhs.pick().getServer().isAssignable()) {
                hasOnlyHistoricalSegments = false;
                break;
            }
            hasher.putString(p.lhs.getSegment().getIdentifier(), Charsets.UTF_8);
        }
        if (hasOnlyHistoricalSegments) {
            hasher.putBytes(queryCacheKey == null ? strategy.computeCacheKey(query) : queryCacheKey);
            String currEtag = Base64.encodeBase64String(hasher.hash().asBytes());
            responseContext.put(QueryResource.HDR_ETAG, currEtag);
            if (prevEtag.equals(currEtag)) {
                return Sequences.empty();
            }
        }
    }
    if (queryCacheKey != null) {
        // cachKeys map must preserve segment ordering, in order for shards to always be combined in the same order
        Map<Pair<ServerSelector, SegmentDescriptor>, Cache.NamedKey> cacheKeys = Maps.newLinkedHashMap();
        for (Pair<ServerSelector, SegmentDescriptor> segment : segments) {
            final Cache.NamedKey segmentCacheKey = CacheUtil.computeSegmentCacheKey(segment.lhs.getSegment().getIdentifier(), segment.rhs, queryCacheKey);
            cacheKeys.put(segment, segmentCacheKey);
        }
        // Pull cached segments from cache and remove from set of segments to query
        final Map<Cache.NamedKey, byte[]> cachedValues;
        if (useCache) {
            cachedValues = cache.getBulk(Iterables.limit(cacheKeys.values(), cacheConfig.getCacheBulkMergeLimit()));
        } else {
            cachedValues = ImmutableMap.of();
        }
        for (Map.Entry<Pair<ServerSelector, SegmentDescriptor>, Cache.NamedKey> entry : cacheKeys.entrySet()) {
            Pair<ServerSelector, SegmentDescriptor> segment = entry.getKey();
            Cache.NamedKey segmentCacheKey = entry.getValue();
            final Interval segmentQueryInterval = segment.rhs.getInterval();
            final byte[] cachedValue = cachedValues.get(segmentCacheKey);
            if (cachedValue != null) {
                // remove cached segment from set of segments to query
                segments.remove(segment);
                cachedResults.add(Pair.of(segmentQueryInterval, cachedValue));
            } else if (populateCache) {
                // otherwise, if populating cache, add segment to list of segments to cache
                final String segmentIdentifier = segment.lhs.getSegment().getIdentifier();
                cachePopulatorMap.put(String.format("%s_%s", segmentIdentifier, segmentQueryInterval), new CachePopulator(cache, objectMapper, segmentCacheKey));
            }
        }
    }
    // Compile list of all segments not pulled from cache
    for (Pair<ServerSelector, SegmentDescriptor> segment : segments) {
        final QueryableDruidServer queryableDruidServer = segment.lhs.pick();
        if (queryableDruidServer == null) {
            log.makeAlert("No servers found for SegmentDescriptor[%s] for DataSource[%s]?! How can this be?!", segment.rhs, query.getDataSource()).emit();
        } else {
            final DruidServer server = queryableDruidServer.getServer();
            List<SegmentDescriptor> descriptors = serverSegments.get(server);
            if (descriptors == null) {
                descriptors = Lists.newArrayList();
                serverSegments.put(server, descriptors);
            }
            descriptors.add(segment.rhs);
        }
    }
    return new LazySequence<>(new Supplier<Sequence<T>>() {

        @Override
        public Sequence<T> get() {
            ArrayList<Sequence<T>> sequencesByInterval = Lists.newArrayList();
            addSequencesFromCache(sequencesByInterval);
            addSequencesFromServer(sequencesByInterval);
            return mergeCachedAndUncachedSequences(query, sequencesByInterval);
        }

        private void addSequencesFromCache(ArrayList<Sequence<T>> listOfSequences) {
            if (strategy == null) {
                return;
            }
            final Function<Object, T> pullFromCacheFunction = strategy.pullFromCache();
            final TypeReference<Object> cacheObjectClazz = strategy.getCacheObjectClazz();
            for (Pair<Interval, byte[]> cachedResultPair : cachedResults) {
                final byte[] cachedResult = cachedResultPair.rhs;
                Sequence<Object> cachedSequence = new BaseSequence<>(new BaseSequence.IteratorMaker<Object, Iterator<Object>>() {

                    @Override
                    public Iterator<Object> make() {
                        try {
                            if (cachedResult.length == 0) {
                                return Iterators.emptyIterator();
                            }
                            return objectMapper.readValues(objectMapper.getFactory().createParser(cachedResult), cacheObjectClazz);
                        } catch (IOException e) {
                            throw Throwables.propagate(e);
                        }
                    }

                    @Override
                    public void cleanup(Iterator<Object> iterFromMake) {
                    }
                });
                listOfSequences.add(Sequences.map(cachedSequence, pullFromCacheFunction));
            }
        }

        private void addSequencesFromServer(ArrayList<Sequence<T>> listOfSequences) {
            listOfSequences.ensureCapacity(listOfSequences.size() + serverSegments.size());
            final Query<T> rewrittenQuery = query.withOverriddenContext(contextBuilder.build());
            // The data gets handled as a Future and parsed in the long Sequence chain in the resultSeqToAdd setter.
            for (Map.Entry<DruidServer, List<SegmentDescriptor>> entry : serverSegments.entrySet()) {
                final DruidServer server = entry.getKey();
                final List<SegmentDescriptor> descriptors = entry.getValue();
                final QueryRunner clientQueryable = serverView.getQueryRunner(server);
                if (clientQueryable == null) {
                    log.error("WTF!? server[%s] doesn't have a client Queryable?", server);
                    continue;
                }
                final MultipleSpecificSegmentSpec segmentSpec = new MultipleSpecificSegmentSpec(descriptors);
                final Sequence<T> resultSeqToAdd;
                if (!server.isAssignable() || !populateCache || isBySegment) {
                    // Direct server queryable
                    if (!isBySegment) {
                        resultSeqToAdd = clientQueryable.run(query.withQuerySegmentSpec(segmentSpec), responseContext);
                    } else {
                        // bySegment queries need to be de-serialized, see DirectDruidClient.run()
                        @SuppressWarnings("unchecked") final Query<Result<BySegmentResultValueClass<T>>> bySegmentQuery = (Query<Result<BySegmentResultValueClass<T>>>) ((Query) query);
                        @SuppressWarnings("unchecked") final Sequence<Result<BySegmentResultValueClass<T>>> resultSequence = clientQueryable.run(bySegmentQuery.withQuerySegmentSpec(segmentSpec), responseContext);
                        resultSeqToAdd = (Sequence) Sequences.map(resultSequence, new Function<Result<BySegmentResultValueClass<T>>, Result<BySegmentResultValueClass<T>>>() {

                            @Override
                            public Result<BySegmentResultValueClass<T>> apply(Result<BySegmentResultValueClass<T>> input) {
                                final BySegmentResultValueClass<T> bySegmentValue = input.getValue();
                                return new Result<>(input.getTimestamp(), new BySegmentResultValueClass<T>(Lists.transform(bySegmentValue.getResults(), toolChest.makePreComputeManipulatorFn(query, MetricManipulatorFns.deserializing())), bySegmentValue.getSegmentId(), bySegmentValue.getInterval()));
                            }
                        });
                    }
                } else {
                    // Requires some manipulation on broker side
                    @SuppressWarnings("unchecked") final Sequence<Result<BySegmentResultValueClass<T>>> runningSequence = clientQueryable.run(rewrittenQuery.withQuerySegmentSpec(segmentSpec), responseContext);
                    resultSeqToAdd = new MergeSequence(query.getResultOrdering(), Sequences.<Result<BySegmentResultValueClass<T>>, Sequence<T>>map(runningSequence, new Function<Result<BySegmentResultValueClass<T>>, Sequence<T>>() {

                        private final Function<T, Object> cacheFn = strategy.prepareForCache();

                        // Acctually do something with the results
                        @Override
                        public Sequence<T> apply(Result<BySegmentResultValueClass<T>> input) {
                            final BySegmentResultValueClass<T> value = input.getValue();
                            final CachePopulator cachePopulator = cachePopulatorMap.get(String.format("%s_%s", value.getSegmentId(), value.getInterval()));
                            final Queue<ListenableFuture<Object>> cacheFutures = new ConcurrentLinkedQueue<>();
                            return Sequences.<T>withEffect(Sequences.<T, T>map(Sequences.<T, T>map(Sequences.<T>simple(value.getResults()), new Function<T, T>() {

                                @Override
                                public T apply(final T input) {
                                    if (cachePopulator != null) {
                                        // only compute cache data if populating cache
                                        cacheFutures.add(backgroundExecutorService.submit(new Callable<Object>() {

                                            @Override
                                            public Object call() {
                                                return cacheFn.apply(input);
                                            }
                                        }));
                                    }
                                    return input;
                                }
                            }), toolChest.makePreComputeManipulatorFn(// This casting is sub-optimal, but hasn't caused any major problems yet...
                            (Query) rewrittenQuery, MetricManipulatorFns.deserializing())), new Runnable() {

                                @Override
                                public void run() {
                                    if (cachePopulator != null) {
                                        Futures.addCallback(Futures.allAsList(cacheFutures), new FutureCallback<List<Object>>() {

                                            @Override
                                            public void onSuccess(List<Object> cacheData) {
                                                cachePopulator.populate(cacheData);
                                                // Help out GC by making sure all references are gone
                                                cacheFutures.clear();
                                            }

                                            @Override
                                            public void onFailure(Throwable throwable) {
                                                log.error(throwable, "Background caching failed");
                                            }
                                        }, backgroundExecutorService);
                                    }
                                }
                            }, MoreExecutors.sameThreadExecutor());
                        // End withEffect
                        }
                    }));
                }
                listOfSequences.add(resultSeqToAdd);
            }
        }
    });
}
Also used : BaseQuery(io.druid.query.BaseQuery) Query(io.druid.query.Query) ArrayList(java.util.ArrayList) ShardSpec(io.druid.timeline.partition.ShardSpec) QueryableDruidServer(io.druid.client.selector.QueryableDruidServer) Result(io.druid.query.Result) ServerSelector(io.druid.client.selector.ServerSelector) List(java.util.List) ArrayList(java.util.ArrayList) FutureCallback(com.google.common.util.concurrent.FutureCallback) Optional(com.google.common.base.Optional) QueryableDruidServer(io.druid.client.selector.QueryableDruidServer) BySegmentResultValueClass(io.druid.query.BySegmentResultValueClass) LazySequence(io.druid.java.util.common.guava.LazySequence) BaseSequence(io.druid.java.util.common.guava.BaseSequence) Sequence(io.druid.java.util.common.guava.Sequence) MergeSequence(io.druid.java.util.common.guava.MergeSequence) ImmutableMap(com.google.common.collect.ImmutableMap) TimelineObjectHolder(io.druid.timeline.TimelineObjectHolder) Hasher(com.google.common.hash.Hasher) ConcurrentLinkedQueue(java.util.concurrent.ConcurrentLinkedQueue) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) Interval(org.joda.time.Interval) Cache(io.druid.client.cache.Cache) MultipleSpecificSegmentSpec(io.druid.query.spec.MultipleSpecificSegmentSpec) Function(com.google.common.base.Function) MergeSequence(io.druid.java.util.common.guava.MergeSequence) SegmentDescriptor(io.druid.query.SegmentDescriptor) Iterator(java.util.Iterator) PartitionChunk(io.druid.timeline.partition.PartitionChunk) TypeReference(com.fasterxml.jackson.core.type.TypeReference) Pair(io.druid.java.util.common.Pair) IOException(java.io.IOException) QueryRunner(io.druid.query.QueryRunner) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) LazySequence(io.druid.java.util.common.guava.LazySequence)

Example 2 with Cache

use of io.druid.client.cache.Cache in project druid by druid-io.

the class CachingQueryRunner method run.

@Override
public Sequence<T> run(Query<T> query, Map<String, Object> responseContext) {
    final CacheStrategy strategy = toolChest.getCacheStrategy(query);
    final boolean populateCache = CacheUtil.populateCacheOnDataNodes(query, strategy, cacheConfig);
    final boolean useCache = CacheUtil.useCacheOnDataNodes(query, strategy, cacheConfig);
    final Cache.NamedKey key;
    if (strategy != null && (useCache || populateCache)) {
        key = CacheUtil.computeSegmentCacheKey(segmentIdentifier, segmentDescriptor, strategy.computeCacheKey(query));
    } else {
        key = null;
    }
    if (useCache) {
        final Function cacheFn = strategy.pullFromCache();
        final byte[] cachedResult = cache.get(key);
        if (cachedResult != null) {
            final TypeReference cacheObjectClazz = strategy.getCacheObjectClazz();
            return Sequences.map(new BaseSequence<>(new BaseSequence.IteratorMaker<T, Iterator<T>>() {

                @Override
                public Iterator<T> make() {
                    try {
                        if (cachedResult.length == 0) {
                            return Iterators.emptyIterator();
                        }
                        return mapper.readValues(mapper.getFactory().createParser(cachedResult), cacheObjectClazz);
                    } catch (IOException e) {
                        throw Throwables.propagate(e);
                    }
                }

                @Override
                public void cleanup(Iterator<T> iterFromMake) {
                }
            }), cacheFn);
        }
    }
    final Collection<ListenableFuture<?>> cacheFutures = Collections.synchronizedList(Lists.<ListenableFuture<?>>newLinkedList());
    if (populateCache) {
        final Function cacheFn = strategy.prepareForCache();
        return Sequences.withEffect(Sequences.map(base.run(query, responseContext), new Function<T, T>() {

            @Override
            public T apply(final T input) {
                final SettableFuture<Object> future = SettableFuture.create();
                cacheFutures.add(future);
                backgroundExecutorService.submit(new Runnable() {

                    @Override
                    public void run() {
                        try {
                            future.set(cacheFn.apply(input));
                        } catch (Exception e) {
                            // if there is exception, should setException to quit the caching processing
                            future.setException(e);
                        }
                    }
                });
                return input;
            }
        }), new Runnable() {

            @Override
            public void run() {
                try {
                    CacheUtil.populate(cache, mapper, key, Futures.allAsList(cacheFutures).get());
                } catch (Exception e) {
                    log.error(e, "Error while getting future for cache task");
                    throw Throwables.propagate(e);
                }
            }
        }, backgroundExecutorService);
    } else {
        return base.run(query, responseContext);
    }
}
Also used : IOException(java.io.IOException) IOException(java.io.IOException) Function(com.google.common.base.Function) Iterator(java.util.Iterator) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) TypeReference(com.fasterxml.jackson.core.type.TypeReference) CacheStrategy(io.druid.query.CacheStrategy) Cache(io.druid.client.cache.Cache)

Example 3 with Cache

use of io.druid.client.cache.Cache in project druid by druid-io.

the class CachingClusteredClientTest method testCachingOverBulkLimitEnforcesLimit.

@Test
@SuppressWarnings("unchecked")
public void testCachingOverBulkLimitEnforcesLimit() throws Exception {
    final int limit = 10;
    final Interval interval = new Interval("2011-01-01/2011-01-02");
    final TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource(DATA_SOURCE).intervals(new MultipleIntervalSegmentSpec(ImmutableList.of(interval))).filters(DIM_FILTER).granularity(GRANULARITY).aggregators(AGGS).postAggregators(POST_AGGS).context(CONTEXT).build();
    final Map<String, Object> context = new HashMap<>();
    final Cache cache = EasyMock.createStrictMock(Cache.class);
    final Capture<Iterable<Cache.NamedKey>> cacheKeyCapture = EasyMock.newCapture();
    EasyMock.expect(cache.getBulk(EasyMock.capture(cacheKeyCapture))).andReturn(ImmutableMap.<Cache.NamedKey, byte[]>of()).once();
    EasyMock.replay(cache);
    client = makeClient(MoreExecutors.sameThreadExecutor(), cache, limit);
    final DruidServer lastServer = servers[random.nextInt(servers.length)];
    final DataSegment dataSegment = EasyMock.createNiceMock(DataSegment.class);
    EasyMock.expect(dataSegment.getIdentifier()).andReturn(DATA_SOURCE).anyTimes();
    EasyMock.replay(dataSegment);
    final ServerSelector selector = new ServerSelector(dataSegment, new HighestPriorityTierSelectorStrategy(new RandomServerSelectorStrategy()));
    selector.addServerAndUpdateSegment(new QueryableDruidServer(lastServer, null), dataSegment);
    timeline.add(interval, "v", new SingleElementPartitionChunk<>(selector));
    client.run(query, context);
    Assert.assertTrue("Capture cache keys", cacheKeyCapture.hasCaptured());
    Assert.assertTrue("Cache key below limit", ImmutableList.copyOf(cacheKeyCapture.getValue()).size() <= limit);
    EasyMock.verify(cache);
    EasyMock.reset(cache);
    cacheKeyCapture.reset();
    EasyMock.expect(cache.getBulk(EasyMock.capture(cacheKeyCapture))).andReturn(ImmutableMap.<Cache.NamedKey, byte[]>of()).once();
    EasyMock.replay(cache);
    client = makeClient(MoreExecutors.sameThreadExecutor(), cache, 0);
    client.run(query, context);
    EasyMock.verify(cache);
    EasyMock.verify(dataSegment);
    Assert.assertTrue("Capture cache keys", cacheKeyCapture.hasCaptured());
    Assert.assertTrue("Cache Keys empty", ImmutableList.copyOf(cacheKeyCapture.getValue()).isEmpty());
}
Also used : TimeseriesQuery(io.druid.query.timeseries.TimeseriesQuery) MergeIterable(io.druid.java.util.common.guava.MergeIterable) FunctionalIterable(io.druid.java.util.common.guava.FunctionalIterable) HashMap(java.util.HashMap) QueryableDruidServer(io.druid.client.selector.QueryableDruidServer) MultipleIntervalSegmentSpec(io.druid.query.spec.MultipleIntervalSegmentSpec) DataSegment(io.druid.timeline.DataSegment) QueryableDruidServer(io.druid.client.selector.QueryableDruidServer) ServerSelector(io.druid.client.selector.ServerSelector) HighestPriorityTierSelectorStrategy(io.druid.client.selector.HighestPriorityTierSelectorStrategy) RandomServerSelectorStrategy(io.druid.client.selector.RandomServerSelectorStrategy) Interval(org.joda.time.Interval) MapCache(io.druid.client.cache.MapCache) Cache(io.druid.client.cache.Cache) Test(org.junit.Test) GroupByQueryRunnerTest(io.druid.query.groupby.GroupByQueryRunnerTest)

Example 4 with Cache

use of io.druid.client.cache.Cache in project druid by druid-io.

the class CachingQueryRunnerTest method testCloseAndPopulate.

private void testCloseAndPopulate(List<Result> expectedRes, List<Result> expectedCacheRes, Query query, QueryToolChest toolchest) throws Exception {
    final AssertingClosable closable = new AssertingClosable();
    final Sequence resultSeq = Sequences.wrap(Sequences.simple(expectedRes), new SequenceWrapper() {

        @Override
        public void before() {
            Assert.assertFalse(closable.isClosed());
        }

        @Override
        public void after(boolean isDone, Throwable thrown) throws Exception {
            closable.close();
        }
    });
    final CountDownLatch cacheMustBePutOnce = new CountDownLatch(1);
    Cache cache = new Cache() {

        private final Map<NamedKey, byte[]> baseMap = new ConcurrentHashMap<>();

        @Override
        public byte[] get(NamedKey key) {
            return baseMap.get(key);
        }

        @Override
        public void put(NamedKey key, byte[] value) {
            baseMap.put(key, value);
            cacheMustBePutOnce.countDown();
        }

        @Override
        public Map<NamedKey, byte[]> getBulk(Iterable<NamedKey> keys) {
            return null;
        }

        @Override
        public void close(String namespace) {
        }

        @Override
        public CacheStats getStats() {
            return null;
        }

        @Override
        public boolean isLocal() {
            return true;
        }

        @Override
        public void doMonitor(ServiceEmitter emitter) {
        }
    };
    String segmentIdentifier = "segment";
    SegmentDescriptor segmentDescriptor = new SegmentDescriptor(new Interval("2011/2012"), "version", 0);
    DefaultObjectMapper objectMapper = new DefaultObjectMapper();
    CachingQueryRunner runner = new CachingQueryRunner(segmentIdentifier, segmentDescriptor, objectMapper, cache, toolchest, new QueryRunner() {

        @Override
        public Sequence run(Query query, Map responseContext) {
            return resultSeq;
        }
    }, backgroundExecutorService, new CacheConfig() {

        @Override
        public boolean isPopulateCache() {
            return true;
        }

        @Override
        public boolean isUseCache() {
            return true;
        }
    });
    CacheStrategy cacheStrategy = toolchest.getCacheStrategy(query);
    Cache.NamedKey cacheKey = CacheUtil.computeSegmentCacheKey(segmentIdentifier, segmentDescriptor, cacheStrategy.computeCacheKey(query));
    HashMap<String, Object> context = new HashMap<String, Object>();
    Sequence res = runner.run(query, context);
    // base sequence is not closed yet
    Assert.assertFalse("sequence must not be closed", closable.isClosed());
    Assert.assertNull("cache must be empty", cache.get(cacheKey));
    ArrayList results = Sequences.toList(res, new ArrayList());
    Assert.assertTrue(closable.isClosed());
    Assert.assertEquals(expectedRes.toString(), results.toString());
    // wait for background caching finish
    // wait at most 10 seconds to fail the test to avoid block overall tests
    Assert.assertTrue("cache must be populated", cacheMustBePutOnce.await(10, TimeUnit.SECONDS));
    byte[] cacheValue = cache.get(cacheKey);
    Assert.assertNotNull(cacheValue);
    Function<Object, Result> fn = cacheStrategy.pullFromCache();
    List<Result> cacheResults = Lists.newArrayList(Iterators.transform(objectMapper.readValues(objectMapper.getFactory().createParser(cacheValue), cacheStrategy.getCacheObjectClazz()), fn));
    Assert.assertEquals(expectedCacheRes.toString(), cacheResults.toString());
}
Also used : ServiceEmitter(com.metamx.emitter.service.ServiceEmitter) TimeseriesQuery(io.druid.query.timeseries.TimeseriesQuery) Query(io.druid.query.Query) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) Result(io.druid.query.Result) SegmentDescriptor(io.druid.query.SegmentDescriptor) CacheConfig(io.druid.client.cache.CacheConfig) SequenceWrapper(io.druid.java.util.common.guava.SequenceWrapper) Sequence(io.druid.java.util.common.guava.Sequence) CountDownLatch(java.util.concurrent.CountDownLatch) IOException(java.io.IOException) QueryRunner(io.druid.query.QueryRunner) DefaultObjectMapper(io.druid.jackson.DefaultObjectMapper) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) CacheStrategy(io.druid.query.CacheStrategy) Cache(io.druid.client.cache.Cache) MapCache(io.druid.client.cache.MapCache) Interval(org.joda.time.Interval)

Example 5 with Cache

use of io.druid.client.cache.Cache in project druid by druid-io.

the class CachingQueryRunnerTest method testUseCache.

private void testUseCache(List<Result> expectedResults, Query query, QueryToolChest toolchest) throws Exception {
    DefaultObjectMapper objectMapper = new DefaultObjectMapper();
    String segmentIdentifier = "segment";
    SegmentDescriptor segmentDescriptor = new SegmentDescriptor(new Interval("2011/2012"), "version", 0);
    CacheStrategy cacheStrategy = toolchest.getCacheStrategy(query);
    Cache.NamedKey cacheKey = CacheUtil.computeSegmentCacheKey(segmentIdentifier, segmentDescriptor, cacheStrategy.computeCacheKey(query));
    Cache cache = MapCache.create(1024 * 1024);
    CacheUtil.populate(cache, objectMapper, cacheKey, Iterables.transform(expectedResults, cacheStrategy.prepareForCache()));
    CachingQueryRunner runner = new CachingQueryRunner(segmentIdentifier, segmentDescriptor, objectMapper, cache, toolchest, // return an empty sequence since results should get pulled from cache
    new QueryRunner() {

        @Override
        public Sequence run(Query query, Map responseContext) {
            return Sequences.empty();
        }
    }, backgroundExecutorService, new CacheConfig() {

        @Override
        public boolean isPopulateCache() {
            return true;
        }

        @Override
        public boolean isUseCache() {
            return true;
        }
    });
    HashMap<String, Object> context = new HashMap<String, Object>();
    List<Result> results = Sequences.toList(runner.run(query, context), new ArrayList());
    Assert.assertEquals(expectedResults.toString(), results.toString());
}
Also used : TimeseriesQuery(io.druid.query.timeseries.TimeseriesQuery) Query(io.druid.query.Query) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) Sequence(io.druid.java.util.common.guava.Sequence) QueryRunner(io.druid.query.QueryRunner) Result(io.druid.query.Result) SegmentDescriptor(io.druid.query.SegmentDescriptor) DefaultObjectMapper(io.druid.jackson.DefaultObjectMapper) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) CacheConfig(io.druid.client.cache.CacheConfig) CacheStrategy(io.druid.query.CacheStrategy) Interval(org.joda.time.Interval) Cache(io.druid.client.cache.Cache) MapCache(io.druid.client.cache.MapCache)

Aggregations

Cache (io.druid.client.cache.Cache)5 Interval (org.joda.time.Interval)4 ImmutableMap (com.google.common.collect.ImmutableMap)3 MapCache (io.druid.client.cache.MapCache)3 Sequence (io.druid.java.util.common.guava.Sequence)3 CacheStrategy (io.druid.query.CacheStrategy)3 Query (io.druid.query.Query)3 QueryRunner (io.druid.query.QueryRunner)3 Result (io.druid.query.Result)3 SegmentDescriptor (io.druid.query.SegmentDescriptor)3 TimeseriesQuery (io.druid.query.timeseries.TimeseriesQuery)3 IOException (java.io.IOException)3 ArrayList (java.util.ArrayList)3 HashMap (java.util.HashMap)3 Map (java.util.Map)3 TypeReference (com.fasterxml.jackson.core.type.TypeReference)2 Function (com.google.common.base.Function)2 ListenableFuture (com.google.common.util.concurrent.ListenableFuture)2 CacheConfig (io.druid.client.cache.CacheConfig)2 QueryableDruidServer (io.druid.client.selector.QueryableDruidServer)2