Search in sources :

Example 6 with Function

use of com.google.common.base.Function in project sharding-jdbc by dangdangdotcom.

the class ShardingRuleMockBuilder method build.

public ShardingRule build() {
    final DataSourceRule dataSourceRule = new DataSourceRule(ImmutableMap.of("db0", Mockito.mock(DataSource.class), "db1", Mockito.mock(DataSource.class)));
    Collection<TableRule> tableRules = Lists.newArrayList(Iterators.transform(autoIncrementColumnMap.keySet().iterator(), new Function<String, TableRule>() {

        @Override
        public TableRule apply(final String input) {
            TableRule.TableRuleBuilder builder = TableRule.builder(input).actualTables(Collections.singletonList(input)).dataSourceRule(dataSourceRule);
            for (String each : autoIncrementColumnMap.get(input)) {
                builder.autoIncrementColumns(each);
            }
            return builder.build();
        }
    }));
    if (tableRules.isEmpty()) {
        tableRules.add(new TableRule.TableRuleBuilder("mock").actualTables(Collections.singletonList("mock")).dataSourceRule(dataSourceRule).build());
    }
    return new ShardingRule.ShardingRuleBuilder().dataSourceRule(dataSourceRule).idGenerator(IncrementIdGenerator.class).tableRules(tableRules).databaseShardingStrategy(new DatabaseShardingStrategy(shardingColumns, new NoneDatabaseShardingAlgorithm())).build();
}
Also used : TableRule(com.dangdang.ddframe.rdb.sharding.api.rule.TableRule) Function(com.google.common.base.Function) DatabaseShardingStrategy(com.dangdang.ddframe.rdb.sharding.api.strategy.database.DatabaseShardingStrategy) DataSourceRule(com.dangdang.ddframe.rdb.sharding.api.rule.DataSourceRule) ShardingRule(com.dangdang.ddframe.rdb.sharding.api.rule.ShardingRule) NoneDatabaseShardingAlgorithm(com.dangdang.ddframe.rdb.sharding.api.strategy.database.NoneDatabaseShardingAlgorithm) IncrementIdGenerator(com.dangdang.ddframe.rdb.sharding.id.generator.fixture.IncrementIdGenerator)

Example 7 with Function

use of com.google.common.base.Function in project druid by druid-io.

the class ScanQueryEngine method process.

public Sequence<ScanResultValue> process(final ScanQuery query, final Segment segment, final Map<String, Object> responseContext) {
    if (responseContext.get(ScanQueryRunnerFactory.CTX_COUNT) != null) {
        int count = (int) responseContext.get(ScanQueryRunnerFactory.CTX_COUNT);
        if (count >= query.getLimit()) {
            return Sequences.empty();
        }
    }
    final Long timeoutAt = (long) responseContext.get(ScanQueryRunnerFactory.CTX_TIMEOUT_AT);
    final long start = System.currentTimeMillis();
    final StorageAdapter adapter = segment.asStorageAdapter();
    if (adapter == null) {
        throw new ISE("Null storage adapter found. Probably trying to issue a query against a segment being memory unmapped.");
    }
    List<String> allDims = Lists.newLinkedList(adapter.getAvailableDimensions());
    List<String> allMetrics = Lists.newLinkedList(adapter.getAvailableMetrics());
    final List<String> allColumns = Lists.newLinkedList();
    if (query.getColumns() != null && !query.getColumns().isEmpty()) {
        if (!query.getColumns().contains(ScanResultValue.timestampKey)) {
            allColumns.add(ScanResultValue.timestampKey);
        }
        allColumns.addAll(query.getColumns());
        allDims.retainAll(query.getColumns());
        allMetrics.retainAll(query.getColumns());
    } else {
        if (!allDims.contains(ScanResultValue.timestampKey)) {
            allColumns.add(ScanResultValue.timestampKey);
        }
        allColumns.addAll(allDims);
        allColumns.addAll(allMetrics);
    }
    final List<DimensionSpec> dims = DefaultDimensionSpec.toSpec(allDims);
    final List<String> metrics = allMetrics;
    final List<Interval> intervals = query.getQuerySegmentSpec().getIntervals();
    Preconditions.checkArgument(intervals.size() == 1, "Can only handle a single interval, got[%s]", intervals);
    final String segmentId = segment.getIdentifier();
    final Filter filter = Filters.convertToCNFFromQueryContext(query, Filters.toFilter(query.getDimensionsFilter()));
    if (responseContext.get(ScanQueryRunnerFactory.CTX_COUNT) == null) {
        responseContext.put(ScanQueryRunnerFactory.CTX_COUNT, 0);
    }
    final int limit = query.getLimit() - (int) responseContext.get(ScanQueryRunnerFactory.CTX_COUNT);
    return Sequences.concat(Sequences.map(adapter.makeCursors(filter, intervals.get(0), VirtualColumns.EMPTY, Granularities.ALL, query.isDescending()), new Function<Cursor, Sequence<ScanResultValue>>() {

        @Override
        public Sequence<ScanResultValue> apply(final Cursor cursor) {
            return new BaseSequence<>(new BaseSequence.IteratorMaker<ScanResultValue, Iterator<ScanResultValue>>() {

                @Override
                public Iterator<ScanResultValue> make() {
                    final LongColumnSelector timestampColumnSelector = cursor.makeLongColumnSelector(Column.TIME_COLUMN_NAME);
                    final List<ColumnSelectorPlus<SelectQueryEngine.SelectColumnSelectorStrategy>> selectorPlusList = Arrays.asList(DimensionHandlerUtils.createColumnSelectorPluses(STRATEGY_FACTORY, Lists.newArrayList(dims), cursor));
                    final Map<String, ObjectColumnSelector> metSelectors = Maps.newHashMap();
                    for (String metric : metrics) {
                        final ObjectColumnSelector metricSelector = cursor.makeObjectColumnSelector(metric);
                        metSelectors.put(metric, metricSelector);
                    }
                    final int batchSize = query.getBatchSize();
                    return new Iterator<ScanResultValue>() {

                        private int offset = 0;

                        @Override
                        public boolean hasNext() {
                            return !cursor.isDone() && offset < limit;
                        }

                        @Override
                        public ScanResultValue next() {
                            if (System.currentTimeMillis() >= timeoutAt) {
                                throw new QueryInterruptedException(new TimeoutException());
                            }
                            int lastOffset = offset;
                            Object events = null;
                            String resultFormat = query.getResultFormat();
                            if (ScanQuery.RESULT_FORMAT_VALUE_VECTOR.equals(resultFormat)) {
                                throw new UnsupportedOperationException("valueVector is not supported now");
                            } else if (ScanQuery.RESULT_FORMAT_COMPACTED_LIST.equals(resultFormat)) {
                                events = rowsToCompactedList();
                            } else {
                                events = rowsToList();
                            }
                            responseContext.put(ScanQueryRunnerFactory.CTX_COUNT, (int) responseContext.get(ScanQueryRunnerFactory.CTX_COUNT) + (offset - lastOffset));
                            responseContext.put(ScanQueryRunnerFactory.CTX_TIMEOUT_AT, timeoutAt - (System.currentTimeMillis() - start));
                            return new ScanResultValue(segmentId, allColumns, events);
                        }

                        @Override
                        public void remove() {
                            throw new UnsupportedOperationException();
                        }

                        private Object rowsToCompactedList() {
                            return Lists.transform((List<Map<String, Object>>) rowsToList(), new Function<Map<String, Object>, Object>() {

                                @Override
                                public Object apply(Map<String, Object> input) {
                                    List eventValues = Lists.newArrayListWithExpectedSize(allColumns.size());
                                    for (String expectedColumn : allColumns) {
                                        eventValues.add(input.get(expectedColumn));
                                    }
                                    return eventValues;
                                }
                            });
                        }

                        private Object rowsToList() {
                            List<Map<String, Object>> events = Lists.newArrayListWithCapacity(batchSize);
                            for (int i = 0; !cursor.isDone() && i < batchSize && offset < limit; cursor.advance(), i++, offset++) {
                                final Map<String, Object> theEvent = SelectQueryEngine.singleEvent(ScanResultValue.timestampKey, timestampColumnSelector, selectorPlusList, metSelectors);
                                events.add(theEvent);
                            }
                            return events;
                        }

                        private Object rowsToValueVector() {
                            // only support list now, we can support ValueVector or Arrow in future
                            return rowsToList();
                        }
                    };
                }

                @Override
                public void cleanup(Iterator<ScanResultValue> iterFromMake) {
                }
            });
        }
    }));
}
Also used : DimensionSpec(io.druid.query.dimension.DimensionSpec) DefaultDimensionSpec(io.druid.query.dimension.DefaultDimensionSpec) StorageAdapter(io.druid.segment.StorageAdapter) Cursor(io.druid.segment.Cursor) Function(com.google.common.base.Function) Iterator(java.util.Iterator) ISE(io.druid.java.util.common.ISE) LongColumnSelector(io.druid.segment.LongColumnSelector) List(java.util.List) ObjectColumnSelector(io.druid.segment.ObjectColumnSelector) QueryInterruptedException(io.druid.query.QueryInterruptedException) TimeoutException(java.util.concurrent.TimeoutException) BaseSequence(io.druid.java.util.common.guava.BaseSequence) Filter(io.druid.query.filter.Filter) Map(java.util.Map) Interval(org.joda.time.Interval)

Example 8 with Function

use of com.google.common.base.Function in project druid by druid-io.

the class IncrementalIndexStorageAdapterTest method testCursoringAndIndexUpdationInterleaving.

@Test
public void testCursoringAndIndexUpdationInterleaving() throws Exception {
    final IncrementalIndex index = indexCreator.createIndex();
    final long timestamp = System.currentTimeMillis();
    for (int i = 0; i < 2; i++) {
        index.add(new MapBasedInputRow(timestamp, Lists.newArrayList("billy"), ImmutableMap.<String, Object>of("billy", "v1" + i)));
    }
    final StorageAdapter sa = new IncrementalIndexStorageAdapter(index);
    Sequence<Cursor> cursors = sa.makeCursors(null, new Interval(timestamp - 60_000, timestamp + 60_000), VirtualColumns.EMPTY, Granularities.ALL, false);
    Sequences.toList(Sequences.map(cursors, new Function<Cursor, Object>() {

        @Nullable
        @Override
        public Object apply(Cursor cursor) {
            DimensionSelector dimSelector = cursor.makeDimensionSelector(new DefaultDimensionSpec("billy", "billy"));
            int cardinality = dimSelector.getValueCardinality();
            //index gets more rows at this point, while other thread is iterating over the cursor
            try {
                for (int i = 0; i < 1; i++) {
                    index.add(new MapBasedInputRow(timestamp, Lists.newArrayList("billy"), ImmutableMap.<String, Object>of("billy", "v2" + i)));
                }
            } catch (Exception ex) {
                throw new RuntimeException(ex);
            }
            // and then, cursoring continues in the other thread
            while (!cursor.isDone()) {
                IndexedInts row = dimSelector.getRow();
                for (int i : row) {
                    Assert.assertTrue(i < cardinality);
                }
                cursor.advance();
            }
            return null;
        }
    }), new ArrayList<>());
}
Also used : DimensionSelector(io.druid.segment.DimensionSelector) StorageAdapter(io.druid.segment.StorageAdapter) Cursor(io.druid.segment.Cursor) DefaultDimensionSpec(io.druid.query.dimension.DefaultDimensionSpec) IOException(java.io.IOException) Function(com.google.common.base.Function) IndexedInts(io.druid.segment.data.IndexedInts) MapBasedInputRow(io.druid.data.input.MapBasedInputRow) Interval(org.joda.time.Interval) Test(org.junit.Test)

Example 9 with Function

use of com.google.common.base.Function in project druid by druid-io.

the class CachingClusteredClient method run.

@Override
public Sequence<T> run(final Query<T> query, final Map<String, Object> responseContext) {
    final QueryToolChest<T, Query<T>> toolChest = warehouse.getToolChest(query);
    final CacheStrategy<T, Object, Query<T>> strategy = toolChest.getCacheStrategy(query);
    final Map<DruidServer, List<SegmentDescriptor>> serverSegments = Maps.newTreeMap();
    final List<Pair<Interval, byte[]>> cachedResults = Lists.newArrayList();
    final Map<String, CachePopulator> cachePopulatorMap = Maps.newHashMap();
    final boolean useCache = CacheUtil.useCacheOnBrokers(query, strategy, cacheConfig);
    final boolean populateCache = CacheUtil.populateCacheOnBrokers(query, strategy, cacheConfig);
    final boolean isBySegment = BaseQuery.getContextBySegment(query, false);
    final ImmutableMap.Builder<String, Object> contextBuilder = new ImmutableMap.Builder<>();
    final int priority = BaseQuery.getContextPriority(query, 0);
    contextBuilder.put("priority", priority);
    if (populateCache) {
        // prevent down-stream nodes from caching results as well if we are populating the cache
        contextBuilder.put(CacheConfig.POPULATE_CACHE, false);
        contextBuilder.put("bySegment", true);
    }
    TimelineLookup<String, ServerSelector> timeline = serverView.getTimeline(query.getDataSource());
    if (timeline == null) {
        return Sequences.empty();
    }
    // build set of segments to query
    Set<Pair<ServerSelector, SegmentDescriptor>> segments = Sets.newLinkedHashSet();
    List<TimelineObjectHolder<String, ServerSelector>> serversLookup = Lists.newLinkedList();
    // Note that enabling this leads to putting uncovered intervals information in the response headers
    // and might blow up in some cases https://github.com/druid-io/druid/issues/2108
    int uncoveredIntervalsLimit = BaseQuery.getContextUncoveredIntervalsLimit(query, 0);
    if (uncoveredIntervalsLimit > 0) {
        List<Interval> uncoveredIntervals = Lists.newArrayListWithCapacity(uncoveredIntervalsLimit);
        boolean uncoveredIntervalsOverflowed = false;
        for (Interval interval : query.getIntervals()) {
            Iterable<TimelineObjectHolder<String, ServerSelector>> lookup = timeline.lookup(interval);
            long startMillis = interval.getStartMillis();
            long endMillis = interval.getEndMillis();
            for (TimelineObjectHolder<String, ServerSelector> holder : lookup) {
                Interval holderInterval = holder.getInterval();
                long intervalStart = holderInterval.getStartMillis();
                if (!uncoveredIntervalsOverflowed && startMillis != intervalStart) {
                    if (uncoveredIntervalsLimit > uncoveredIntervals.size()) {
                        uncoveredIntervals.add(new Interval(startMillis, intervalStart));
                    } else {
                        uncoveredIntervalsOverflowed = true;
                    }
                }
                startMillis = holderInterval.getEndMillis();
                serversLookup.add(holder);
            }
            if (!uncoveredIntervalsOverflowed && startMillis < endMillis) {
                if (uncoveredIntervalsLimit > uncoveredIntervals.size()) {
                    uncoveredIntervals.add(new Interval(startMillis, endMillis));
                } else {
                    uncoveredIntervalsOverflowed = true;
                }
            }
        }
        if (!uncoveredIntervals.isEmpty()) {
            // This returns intervals for which NO segment is present.
            // Which is not necessarily an indication that the data doesn't exist or is
            // incomplete. The data could exist and just not be loaded yet.  In either
            // case, though, this query will not include any data from the identified intervals.
            responseContext.put("uncoveredIntervals", uncoveredIntervals);
            responseContext.put("uncoveredIntervalsOverflowed", uncoveredIntervalsOverflowed);
        }
    } else {
        for (Interval interval : query.getIntervals()) {
            Iterables.addAll(serversLookup, timeline.lookup(interval));
        }
    }
    // Let tool chest filter out unneeded segments
    final List<TimelineObjectHolder<String, ServerSelector>> filteredServersLookup = toolChest.filterSegments(query, serversLookup);
    Map<String, Optional<RangeSet<String>>> dimensionRangeCache = Maps.newHashMap();
    // Filter unneeded chunks based on partition dimension
    for (TimelineObjectHolder<String, ServerSelector> holder : filteredServersLookup) {
        final Set<PartitionChunk<ServerSelector>> filteredChunks = DimFilterUtils.filterShards(query.getFilter(), holder.getObject(), new Function<PartitionChunk<ServerSelector>, ShardSpec>() {

            @Override
            public ShardSpec apply(PartitionChunk<ServerSelector> input) {
                return input.getObject().getSegment().getShardSpec();
            }
        }, dimensionRangeCache);
        for (PartitionChunk<ServerSelector> chunk : filteredChunks) {
            ServerSelector selector = chunk.getObject();
            final SegmentDescriptor descriptor = new SegmentDescriptor(holder.getInterval(), holder.getVersion(), chunk.getChunkNumber());
            segments.add(Pair.of(selector, descriptor));
        }
    }
    final byte[] queryCacheKey;
    if (// implies strategy != null
    (populateCache || useCache) && // explicit bySegment queries are never cached
    !isBySegment) {
        queryCacheKey = strategy.computeCacheKey(query);
    } else {
        queryCacheKey = null;
    }
    if (query.getContext().get(QueryResource.HDR_IF_NONE_MATCH) != null) {
        String prevEtag = (String) query.getContext().get(QueryResource.HDR_IF_NONE_MATCH);
        //compute current Etag
        Hasher hasher = Hashing.sha1().newHasher();
        boolean hasOnlyHistoricalSegments = true;
        for (Pair<ServerSelector, SegmentDescriptor> p : segments) {
            if (!p.lhs.pick().getServer().isAssignable()) {
                hasOnlyHistoricalSegments = false;
                break;
            }
            hasher.putString(p.lhs.getSegment().getIdentifier(), Charsets.UTF_8);
        }
        if (hasOnlyHistoricalSegments) {
            hasher.putBytes(queryCacheKey == null ? strategy.computeCacheKey(query) : queryCacheKey);
            String currEtag = Base64.encodeBase64String(hasher.hash().asBytes());
            responseContext.put(QueryResource.HDR_ETAG, currEtag);
            if (prevEtag.equals(currEtag)) {
                return Sequences.empty();
            }
        }
    }
    if (queryCacheKey != null) {
        // cachKeys map must preserve segment ordering, in order for shards to always be combined in the same order
        Map<Pair<ServerSelector, SegmentDescriptor>, Cache.NamedKey> cacheKeys = Maps.newLinkedHashMap();
        for (Pair<ServerSelector, SegmentDescriptor> segment : segments) {
            final Cache.NamedKey segmentCacheKey = CacheUtil.computeSegmentCacheKey(segment.lhs.getSegment().getIdentifier(), segment.rhs, queryCacheKey);
            cacheKeys.put(segment, segmentCacheKey);
        }
        // Pull cached segments from cache and remove from set of segments to query
        final Map<Cache.NamedKey, byte[]> cachedValues;
        if (useCache) {
            cachedValues = cache.getBulk(Iterables.limit(cacheKeys.values(), cacheConfig.getCacheBulkMergeLimit()));
        } else {
            cachedValues = ImmutableMap.of();
        }
        for (Map.Entry<Pair<ServerSelector, SegmentDescriptor>, Cache.NamedKey> entry : cacheKeys.entrySet()) {
            Pair<ServerSelector, SegmentDescriptor> segment = entry.getKey();
            Cache.NamedKey segmentCacheKey = entry.getValue();
            final Interval segmentQueryInterval = segment.rhs.getInterval();
            final byte[] cachedValue = cachedValues.get(segmentCacheKey);
            if (cachedValue != null) {
                // remove cached segment from set of segments to query
                segments.remove(segment);
                cachedResults.add(Pair.of(segmentQueryInterval, cachedValue));
            } else if (populateCache) {
                // otherwise, if populating cache, add segment to list of segments to cache
                final String segmentIdentifier = segment.lhs.getSegment().getIdentifier();
                cachePopulatorMap.put(String.format("%s_%s", segmentIdentifier, segmentQueryInterval), new CachePopulator(cache, objectMapper, segmentCacheKey));
            }
        }
    }
    // Compile list of all segments not pulled from cache
    for (Pair<ServerSelector, SegmentDescriptor> segment : segments) {
        final QueryableDruidServer queryableDruidServer = segment.lhs.pick();
        if (queryableDruidServer == null) {
            log.makeAlert("No servers found for SegmentDescriptor[%s] for DataSource[%s]?! How can this be?!", segment.rhs, query.getDataSource()).emit();
        } else {
            final DruidServer server = queryableDruidServer.getServer();
            List<SegmentDescriptor> descriptors = serverSegments.get(server);
            if (descriptors == null) {
                descriptors = Lists.newArrayList();
                serverSegments.put(server, descriptors);
            }
            descriptors.add(segment.rhs);
        }
    }
    return new LazySequence<>(new Supplier<Sequence<T>>() {

        @Override
        public Sequence<T> get() {
            ArrayList<Sequence<T>> sequencesByInterval = Lists.newArrayList();
            addSequencesFromCache(sequencesByInterval);
            addSequencesFromServer(sequencesByInterval);
            return mergeCachedAndUncachedSequences(query, sequencesByInterval);
        }

        private void addSequencesFromCache(ArrayList<Sequence<T>> listOfSequences) {
            if (strategy == null) {
                return;
            }
            final Function<Object, T> pullFromCacheFunction = strategy.pullFromCache();
            final TypeReference<Object> cacheObjectClazz = strategy.getCacheObjectClazz();
            for (Pair<Interval, byte[]> cachedResultPair : cachedResults) {
                final byte[] cachedResult = cachedResultPair.rhs;
                Sequence<Object> cachedSequence = new BaseSequence<>(new BaseSequence.IteratorMaker<Object, Iterator<Object>>() {

                    @Override
                    public Iterator<Object> make() {
                        try {
                            if (cachedResult.length == 0) {
                                return Iterators.emptyIterator();
                            }
                            return objectMapper.readValues(objectMapper.getFactory().createParser(cachedResult), cacheObjectClazz);
                        } catch (IOException e) {
                            throw Throwables.propagate(e);
                        }
                    }

                    @Override
                    public void cleanup(Iterator<Object> iterFromMake) {
                    }
                });
                listOfSequences.add(Sequences.map(cachedSequence, pullFromCacheFunction));
            }
        }

        private void addSequencesFromServer(ArrayList<Sequence<T>> listOfSequences) {
            listOfSequences.ensureCapacity(listOfSequences.size() + serverSegments.size());
            final Query<T> rewrittenQuery = query.withOverriddenContext(contextBuilder.build());
            // The data gets handled as a Future and parsed in the long Sequence chain in the resultSeqToAdd setter.
            for (Map.Entry<DruidServer, List<SegmentDescriptor>> entry : serverSegments.entrySet()) {
                final DruidServer server = entry.getKey();
                final List<SegmentDescriptor> descriptors = entry.getValue();
                final QueryRunner clientQueryable = serverView.getQueryRunner(server);
                if (clientQueryable == null) {
                    log.error("WTF!? server[%s] doesn't have a client Queryable?", server);
                    continue;
                }
                final MultipleSpecificSegmentSpec segmentSpec = new MultipleSpecificSegmentSpec(descriptors);
                final Sequence<T> resultSeqToAdd;
                if (!server.isAssignable() || !populateCache || isBySegment) {
                    // Direct server queryable
                    if (!isBySegment) {
                        resultSeqToAdd = clientQueryable.run(query.withQuerySegmentSpec(segmentSpec), responseContext);
                    } else {
                        // bySegment queries need to be de-serialized, see DirectDruidClient.run()
                        @SuppressWarnings("unchecked") final Query<Result<BySegmentResultValueClass<T>>> bySegmentQuery = (Query<Result<BySegmentResultValueClass<T>>>) ((Query) query);
                        @SuppressWarnings("unchecked") final Sequence<Result<BySegmentResultValueClass<T>>> resultSequence = clientQueryable.run(bySegmentQuery.withQuerySegmentSpec(segmentSpec), responseContext);
                        resultSeqToAdd = (Sequence) Sequences.map(resultSequence, new Function<Result<BySegmentResultValueClass<T>>, Result<BySegmentResultValueClass<T>>>() {

                            @Override
                            public Result<BySegmentResultValueClass<T>> apply(Result<BySegmentResultValueClass<T>> input) {
                                final BySegmentResultValueClass<T> bySegmentValue = input.getValue();
                                return new Result<>(input.getTimestamp(), new BySegmentResultValueClass<T>(Lists.transform(bySegmentValue.getResults(), toolChest.makePreComputeManipulatorFn(query, MetricManipulatorFns.deserializing())), bySegmentValue.getSegmentId(), bySegmentValue.getInterval()));
                            }
                        });
                    }
                } else {
                    // Requires some manipulation on broker side
                    @SuppressWarnings("unchecked") final Sequence<Result<BySegmentResultValueClass<T>>> runningSequence = clientQueryable.run(rewrittenQuery.withQuerySegmentSpec(segmentSpec), responseContext);
                    resultSeqToAdd = new MergeSequence(query.getResultOrdering(), Sequences.<Result<BySegmentResultValueClass<T>>, Sequence<T>>map(runningSequence, new Function<Result<BySegmentResultValueClass<T>>, Sequence<T>>() {

                        private final Function<T, Object> cacheFn = strategy.prepareForCache();

                        // Acctually do something with the results
                        @Override
                        public Sequence<T> apply(Result<BySegmentResultValueClass<T>> input) {
                            final BySegmentResultValueClass<T> value = input.getValue();
                            final CachePopulator cachePopulator = cachePopulatorMap.get(String.format("%s_%s", value.getSegmentId(), value.getInterval()));
                            final Queue<ListenableFuture<Object>> cacheFutures = new ConcurrentLinkedQueue<>();
                            return Sequences.<T>withEffect(Sequences.<T, T>map(Sequences.<T, T>map(Sequences.<T>simple(value.getResults()), new Function<T, T>() {

                                @Override
                                public T apply(final T input) {
                                    if (cachePopulator != null) {
                                        // only compute cache data if populating cache
                                        cacheFutures.add(backgroundExecutorService.submit(new Callable<Object>() {

                                            @Override
                                            public Object call() {
                                                return cacheFn.apply(input);
                                            }
                                        }));
                                    }
                                    return input;
                                }
                            }), toolChest.makePreComputeManipulatorFn(// This casting is sub-optimal, but hasn't caused any major problems yet...
                            (Query) rewrittenQuery, MetricManipulatorFns.deserializing())), new Runnable() {

                                @Override
                                public void run() {
                                    if (cachePopulator != null) {
                                        Futures.addCallback(Futures.allAsList(cacheFutures), new FutureCallback<List<Object>>() {

                                            @Override
                                            public void onSuccess(List<Object> cacheData) {
                                                cachePopulator.populate(cacheData);
                                                // Help out GC by making sure all references are gone
                                                cacheFutures.clear();
                                            }

                                            @Override
                                            public void onFailure(Throwable throwable) {
                                                log.error(throwable, "Background caching failed");
                                            }
                                        }, backgroundExecutorService);
                                    }
                                }
                            }, MoreExecutors.sameThreadExecutor());
                        // End withEffect
                        }
                    }));
                }
                listOfSequences.add(resultSeqToAdd);
            }
        }
    });
}
Also used : BaseQuery(io.druid.query.BaseQuery) Query(io.druid.query.Query) ArrayList(java.util.ArrayList) ShardSpec(io.druid.timeline.partition.ShardSpec) QueryableDruidServer(io.druid.client.selector.QueryableDruidServer) Result(io.druid.query.Result) ServerSelector(io.druid.client.selector.ServerSelector) List(java.util.List) ArrayList(java.util.ArrayList) FutureCallback(com.google.common.util.concurrent.FutureCallback) Optional(com.google.common.base.Optional) QueryableDruidServer(io.druid.client.selector.QueryableDruidServer) BySegmentResultValueClass(io.druid.query.BySegmentResultValueClass) LazySequence(io.druid.java.util.common.guava.LazySequence) BaseSequence(io.druid.java.util.common.guava.BaseSequence) Sequence(io.druid.java.util.common.guava.Sequence) MergeSequence(io.druid.java.util.common.guava.MergeSequence) ImmutableMap(com.google.common.collect.ImmutableMap) TimelineObjectHolder(io.druid.timeline.TimelineObjectHolder) Hasher(com.google.common.hash.Hasher) ConcurrentLinkedQueue(java.util.concurrent.ConcurrentLinkedQueue) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) Interval(org.joda.time.Interval) Cache(io.druid.client.cache.Cache) MultipleSpecificSegmentSpec(io.druid.query.spec.MultipleSpecificSegmentSpec) Function(com.google.common.base.Function) MergeSequence(io.druid.java.util.common.guava.MergeSequence) SegmentDescriptor(io.druid.query.SegmentDescriptor) Iterator(java.util.Iterator) PartitionChunk(io.druid.timeline.partition.PartitionChunk) TypeReference(com.fasterxml.jackson.core.type.TypeReference) Pair(io.druid.java.util.common.Pair) IOException(java.io.IOException) QueryRunner(io.druid.query.QueryRunner) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) LazySequence(io.druid.java.util.common.guava.LazySequence)

Example 10 with Function

use of com.google.common.base.Function in project druid by druid-io.

the class CachingQueryRunner method run.

@Override
public Sequence<T> run(Query<T> query, Map<String, Object> responseContext) {
    final CacheStrategy strategy = toolChest.getCacheStrategy(query);
    final boolean populateCache = CacheUtil.populateCacheOnDataNodes(query, strategy, cacheConfig);
    final boolean useCache = CacheUtil.useCacheOnDataNodes(query, strategy, cacheConfig);
    final Cache.NamedKey key;
    if (strategy != null && (useCache || populateCache)) {
        key = CacheUtil.computeSegmentCacheKey(segmentIdentifier, segmentDescriptor, strategy.computeCacheKey(query));
    } else {
        key = null;
    }
    if (useCache) {
        final Function cacheFn = strategy.pullFromCache();
        final byte[] cachedResult = cache.get(key);
        if (cachedResult != null) {
            final TypeReference cacheObjectClazz = strategy.getCacheObjectClazz();
            return Sequences.map(new BaseSequence<>(new BaseSequence.IteratorMaker<T, Iterator<T>>() {

                @Override
                public Iterator<T> make() {
                    try {
                        if (cachedResult.length == 0) {
                            return Iterators.emptyIterator();
                        }
                        return mapper.readValues(mapper.getFactory().createParser(cachedResult), cacheObjectClazz);
                    } catch (IOException e) {
                        throw Throwables.propagate(e);
                    }
                }

                @Override
                public void cleanup(Iterator<T> iterFromMake) {
                }
            }), cacheFn);
        }
    }
    final Collection<ListenableFuture<?>> cacheFutures = Collections.synchronizedList(Lists.<ListenableFuture<?>>newLinkedList());
    if (populateCache) {
        final Function cacheFn = strategy.prepareForCache();
        return Sequences.withEffect(Sequences.map(base.run(query, responseContext), new Function<T, T>() {

            @Override
            public T apply(final T input) {
                final SettableFuture<Object> future = SettableFuture.create();
                cacheFutures.add(future);
                backgroundExecutorService.submit(new Runnable() {

                    @Override
                    public void run() {
                        try {
                            future.set(cacheFn.apply(input));
                        } catch (Exception e) {
                            // if there is exception, should setException to quit the caching processing
                            future.setException(e);
                        }
                    }
                });
                return input;
            }
        }), new Runnable() {

            @Override
            public void run() {
                try {
                    CacheUtil.populate(cache, mapper, key, Futures.allAsList(cacheFutures).get());
                } catch (Exception e) {
                    log.error(e, "Error while getting future for cache task");
                    throw Throwables.propagate(e);
                }
            }
        }, backgroundExecutorService);
    } else {
        return base.run(query, responseContext);
    }
}
Also used : IOException(java.io.IOException) IOException(java.io.IOException) Function(com.google.common.base.Function) Iterator(java.util.Iterator) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) TypeReference(com.fasterxml.jackson.core.type.TypeReference) CacheStrategy(io.druid.query.CacheStrategy) Cache(io.druid.client.cache.Cache)

Aggregations

Function (com.google.common.base.Function)315 Nullable (javax.annotation.Nullable)57 ArrayList (java.util.ArrayList)51 List (java.util.List)49 Test (org.junit.Test)49 IOException (java.io.IOException)48 Map (java.util.Map)45 File (java.io.File)29 HashMap (java.util.HashMap)29 ImmutableList (com.google.common.collect.ImmutableList)23 ImmutableMap (com.google.common.collect.ImmutableMap)19 DateTime (org.joda.time.DateTime)19 Optional (com.google.common.base.Optional)18 ISE (io.druid.java.util.common.ISE)14 Iterator (java.util.Iterator)14 ImmutableSet (com.google.common.collect.ImmutableSet)13 Result (io.druid.query.Result)13 Feature (org.opengis.feature.Feature)13 BuildTarget (com.facebook.buck.model.BuildTarget)12 AggregatorFactory (io.druid.query.aggregation.AggregatorFactory)12