Search in sources :

Example 6 with TypeReference

use of com.fasterxml.jackson.core.type.TypeReference in project Java-Mandrill-Wrapper by cribbstechnologies.

the class MandrillRESTRequestTest method testUrlTimeResponseConversion.

@Test
public void testUrlTimeResponseConversion() throws IOException {
    this.initRequestWithActualMapper();
    StringWriter sw = new StringWriter();
    IOUtils.copy(this.getClass().getClassLoader().getResourceAsStream("urls/urlTimeResponse.txt"), sw);
    TypeReference urlTimeReference = new TypeReference<List<TimeUrlResponse>>() {
    };
    BaseMandrillAnonymousListResponse<TimeUrlResponse> response = (BaseMandrillAnonymousListResponse<TimeUrlResponse>) this.request.convertAnonymousListResponseData(sw.toString(), UrlTimeResponse.class, urlTimeReference);
    assertEquals(2, response.getList().size());
    TimeUrlResponse timeUrlResponse = response.getList().get(0);
    assertEquals("example time1", timeUrlResponse.getTime());
    assertEquals(2, timeUrlResponse.getSent());
    assertEquals(3, timeUrlResponse.getClicks());
    assertEquals(4, timeUrlResponse.getUnique_clicks());
    timeUrlResponse = response.getList().get(1);
    assertEquals("example time11", timeUrlResponse.getTime());
    assertEquals(12, timeUrlResponse.getSent());
    assertEquals(13, timeUrlResponse.getClicks());
    assertEquals(14, timeUrlResponse.getUnique_clicks());
}
Also used : BaseMandrillAnonymousListResponse(com.cribbstechnologies.clients.mandrill.model.response.BaseMandrillAnonymousListResponse) StringWriter(java.io.StringWriter) TimeUrlResponse(com.cribbstechnologies.clients.mandrill.model.response.urls.TimeUrlResponse) TypeReference(com.fasterxml.jackson.core.type.TypeReference) UrlTimeResponse(com.cribbstechnologies.clients.mandrill.model.response.urls.UrlTimeResponse) Test(org.junit.Test)

Example 7 with TypeReference

use of com.fasterxml.jackson.core.type.TypeReference in project druid by druid-io.

the class WhiteListBasedConverter method readMap.

private ImmutableSortedMap<String, ImmutableSet<String>> readMap(final String mapPath) {
    String fileContent;
    String actualPath = mapPath;
    try {
        if (Strings.isNullOrEmpty(mapPath)) {
            URL resource = this.getClass().getClassLoader().getResource("defaultWhiteListMap.json");
            actualPath = resource.getFile();
            LOGGER.info("using default whiteList map located at [%s]", actualPath);
            fileContent = Resources.toString(resource, Charset.defaultCharset());
        } else {
            fileContent = Files.asCharSource(new File(mapPath), Charset.forName("UTF-8")).read();
        }
        return mapper.reader(new TypeReference<ImmutableSortedMap<String, ImmutableSet<String>>>() {
        }).readValue(fileContent);
    } catch (IOException e) {
        throw new ISE(e, "Got an exception while parsing file [%s]", actualPath);
    }
}
Also used : ImmutableSet(com.google.common.collect.ImmutableSet) ISE(io.druid.java.util.common.ISE) TypeReference(com.fasterxml.jackson.core.type.TypeReference) IOException(java.io.IOException) File(java.io.File) URL(java.net.URL)

Example 8 with TypeReference

use of com.fasterxml.jackson.core.type.TypeReference in project druid by druid-io.

the class CachingClusteredClient method run.

@Override
public Sequence<T> run(final Query<T> query, final Map<String, Object> responseContext) {
    final QueryToolChest<T, Query<T>> toolChest = warehouse.getToolChest(query);
    final CacheStrategy<T, Object, Query<T>> strategy = toolChest.getCacheStrategy(query);
    final Map<DruidServer, List<SegmentDescriptor>> serverSegments = Maps.newTreeMap();
    final List<Pair<Interval, byte[]>> cachedResults = Lists.newArrayList();
    final Map<String, CachePopulator> cachePopulatorMap = Maps.newHashMap();
    final boolean useCache = CacheUtil.useCacheOnBrokers(query, strategy, cacheConfig);
    final boolean populateCache = CacheUtil.populateCacheOnBrokers(query, strategy, cacheConfig);
    final boolean isBySegment = BaseQuery.getContextBySegment(query, false);
    final ImmutableMap.Builder<String, Object> contextBuilder = new ImmutableMap.Builder<>();
    final int priority = BaseQuery.getContextPriority(query, 0);
    contextBuilder.put("priority", priority);
    if (populateCache) {
        // prevent down-stream nodes from caching results as well if we are populating the cache
        contextBuilder.put(CacheConfig.POPULATE_CACHE, false);
        contextBuilder.put("bySegment", true);
    }
    TimelineLookup<String, ServerSelector> timeline = serverView.getTimeline(query.getDataSource());
    if (timeline == null) {
        return Sequences.empty();
    }
    // build set of segments to query
    Set<Pair<ServerSelector, SegmentDescriptor>> segments = Sets.newLinkedHashSet();
    List<TimelineObjectHolder<String, ServerSelector>> serversLookup = Lists.newLinkedList();
    // Note that enabling this leads to putting uncovered intervals information in the response headers
    // and might blow up in some cases https://github.com/druid-io/druid/issues/2108
    int uncoveredIntervalsLimit = BaseQuery.getContextUncoveredIntervalsLimit(query, 0);
    if (uncoveredIntervalsLimit > 0) {
        List<Interval> uncoveredIntervals = Lists.newArrayListWithCapacity(uncoveredIntervalsLimit);
        boolean uncoveredIntervalsOverflowed = false;
        for (Interval interval : query.getIntervals()) {
            Iterable<TimelineObjectHolder<String, ServerSelector>> lookup = timeline.lookup(interval);
            long startMillis = interval.getStartMillis();
            long endMillis = interval.getEndMillis();
            for (TimelineObjectHolder<String, ServerSelector> holder : lookup) {
                Interval holderInterval = holder.getInterval();
                long intervalStart = holderInterval.getStartMillis();
                if (!uncoveredIntervalsOverflowed && startMillis != intervalStart) {
                    if (uncoveredIntervalsLimit > uncoveredIntervals.size()) {
                        uncoveredIntervals.add(new Interval(startMillis, intervalStart));
                    } else {
                        uncoveredIntervalsOverflowed = true;
                    }
                }
                startMillis = holderInterval.getEndMillis();
                serversLookup.add(holder);
            }
            if (!uncoveredIntervalsOverflowed && startMillis < endMillis) {
                if (uncoveredIntervalsLimit > uncoveredIntervals.size()) {
                    uncoveredIntervals.add(new Interval(startMillis, endMillis));
                } else {
                    uncoveredIntervalsOverflowed = true;
                }
            }
        }
        if (!uncoveredIntervals.isEmpty()) {
            // This returns intervals for which NO segment is present.
            // Which is not necessarily an indication that the data doesn't exist or is
            // incomplete. The data could exist and just not be loaded yet.  In either
            // case, though, this query will not include any data from the identified intervals.
            responseContext.put("uncoveredIntervals", uncoveredIntervals);
            responseContext.put("uncoveredIntervalsOverflowed", uncoveredIntervalsOverflowed);
        }
    } else {
        for (Interval interval : query.getIntervals()) {
            Iterables.addAll(serversLookup, timeline.lookup(interval));
        }
    }
    // Let tool chest filter out unneeded segments
    final List<TimelineObjectHolder<String, ServerSelector>> filteredServersLookup = toolChest.filterSegments(query, serversLookup);
    Map<String, Optional<RangeSet<String>>> dimensionRangeCache = Maps.newHashMap();
    // Filter unneeded chunks based on partition dimension
    for (TimelineObjectHolder<String, ServerSelector> holder : filteredServersLookup) {
        final Set<PartitionChunk<ServerSelector>> filteredChunks = DimFilterUtils.filterShards(query.getFilter(), holder.getObject(), new Function<PartitionChunk<ServerSelector>, ShardSpec>() {

            @Override
            public ShardSpec apply(PartitionChunk<ServerSelector> input) {
                return input.getObject().getSegment().getShardSpec();
            }
        }, dimensionRangeCache);
        for (PartitionChunk<ServerSelector> chunk : filteredChunks) {
            ServerSelector selector = chunk.getObject();
            final SegmentDescriptor descriptor = new SegmentDescriptor(holder.getInterval(), holder.getVersion(), chunk.getChunkNumber());
            segments.add(Pair.of(selector, descriptor));
        }
    }
    final byte[] queryCacheKey;
    if (// implies strategy != null
    (populateCache || useCache) && // explicit bySegment queries are never cached
    !isBySegment) {
        queryCacheKey = strategy.computeCacheKey(query);
    } else {
        queryCacheKey = null;
    }
    if (query.getContext().get(QueryResource.HDR_IF_NONE_MATCH) != null) {
        String prevEtag = (String) query.getContext().get(QueryResource.HDR_IF_NONE_MATCH);
        //compute current Etag
        Hasher hasher = Hashing.sha1().newHasher();
        boolean hasOnlyHistoricalSegments = true;
        for (Pair<ServerSelector, SegmentDescriptor> p : segments) {
            if (!p.lhs.pick().getServer().isAssignable()) {
                hasOnlyHistoricalSegments = false;
                break;
            }
            hasher.putString(p.lhs.getSegment().getIdentifier(), Charsets.UTF_8);
        }
        if (hasOnlyHistoricalSegments) {
            hasher.putBytes(queryCacheKey == null ? strategy.computeCacheKey(query) : queryCacheKey);
            String currEtag = Base64.encodeBase64String(hasher.hash().asBytes());
            responseContext.put(QueryResource.HDR_ETAG, currEtag);
            if (prevEtag.equals(currEtag)) {
                return Sequences.empty();
            }
        }
    }
    if (queryCacheKey != null) {
        // cachKeys map must preserve segment ordering, in order for shards to always be combined in the same order
        Map<Pair<ServerSelector, SegmentDescriptor>, Cache.NamedKey> cacheKeys = Maps.newLinkedHashMap();
        for (Pair<ServerSelector, SegmentDescriptor> segment : segments) {
            final Cache.NamedKey segmentCacheKey = CacheUtil.computeSegmentCacheKey(segment.lhs.getSegment().getIdentifier(), segment.rhs, queryCacheKey);
            cacheKeys.put(segment, segmentCacheKey);
        }
        // Pull cached segments from cache and remove from set of segments to query
        final Map<Cache.NamedKey, byte[]> cachedValues;
        if (useCache) {
            cachedValues = cache.getBulk(Iterables.limit(cacheKeys.values(), cacheConfig.getCacheBulkMergeLimit()));
        } else {
            cachedValues = ImmutableMap.of();
        }
        for (Map.Entry<Pair<ServerSelector, SegmentDescriptor>, Cache.NamedKey> entry : cacheKeys.entrySet()) {
            Pair<ServerSelector, SegmentDescriptor> segment = entry.getKey();
            Cache.NamedKey segmentCacheKey = entry.getValue();
            final Interval segmentQueryInterval = segment.rhs.getInterval();
            final byte[] cachedValue = cachedValues.get(segmentCacheKey);
            if (cachedValue != null) {
                // remove cached segment from set of segments to query
                segments.remove(segment);
                cachedResults.add(Pair.of(segmentQueryInterval, cachedValue));
            } else if (populateCache) {
                // otherwise, if populating cache, add segment to list of segments to cache
                final String segmentIdentifier = segment.lhs.getSegment().getIdentifier();
                cachePopulatorMap.put(String.format("%s_%s", segmentIdentifier, segmentQueryInterval), new CachePopulator(cache, objectMapper, segmentCacheKey));
            }
        }
    }
    // Compile list of all segments not pulled from cache
    for (Pair<ServerSelector, SegmentDescriptor> segment : segments) {
        final QueryableDruidServer queryableDruidServer = segment.lhs.pick();
        if (queryableDruidServer == null) {
            log.makeAlert("No servers found for SegmentDescriptor[%s] for DataSource[%s]?! How can this be?!", segment.rhs, query.getDataSource()).emit();
        } else {
            final DruidServer server = queryableDruidServer.getServer();
            List<SegmentDescriptor> descriptors = serverSegments.get(server);
            if (descriptors == null) {
                descriptors = Lists.newArrayList();
                serverSegments.put(server, descriptors);
            }
            descriptors.add(segment.rhs);
        }
    }
    return new LazySequence<>(new Supplier<Sequence<T>>() {

        @Override
        public Sequence<T> get() {
            ArrayList<Sequence<T>> sequencesByInterval = Lists.newArrayList();
            addSequencesFromCache(sequencesByInterval);
            addSequencesFromServer(sequencesByInterval);
            return mergeCachedAndUncachedSequences(query, sequencesByInterval);
        }

        private void addSequencesFromCache(ArrayList<Sequence<T>> listOfSequences) {
            if (strategy == null) {
                return;
            }
            final Function<Object, T> pullFromCacheFunction = strategy.pullFromCache();
            final TypeReference<Object> cacheObjectClazz = strategy.getCacheObjectClazz();
            for (Pair<Interval, byte[]> cachedResultPair : cachedResults) {
                final byte[] cachedResult = cachedResultPair.rhs;
                Sequence<Object> cachedSequence = new BaseSequence<>(new BaseSequence.IteratorMaker<Object, Iterator<Object>>() {

                    @Override
                    public Iterator<Object> make() {
                        try {
                            if (cachedResult.length == 0) {
                                return Iterators.emptyIterator();
                            }
                            return objectMapper.readValues(objectMapper.getFactory().createParser(cachedResult), cacheObjectClazz);
                        } catch (IOException e) {
                            throw Throwables.propagate(e);
                        }
                    }

                    @Override
                    public void cleanup(Iterator<Object> iterFromMake) {
                    }
                });
                listOfSequences.add(Sequences.map(cachedSequence, pullFromCacheFunction));
            }
        }

        private void addSequencesFromServer(ArrayList<Sequence<T>> listOfSequences) {
            listOfSequences.ensureCapacity(listOfSequences.size() + serverSegments.size());
            final Query<T> rewrittenQuery = query.withOverriddenContext(contextBuilder.build());
            // The data gets handled as a Future and parsed in the long Sequence chain in the resultSeqToAdd setter.
            for (Map.Entry<DruidServer, List<SegmentDescriptor>> entry : serverSegments.entrySet()) {
                final DruidServer server = entry.getKey();
                final List<SegmentDescriptor> descriptors = entry.getValue();
                final QueryRunner clientQueryable = serverView.getQueryRunner(server);
                if (clientQueryable == null) {
                    log.error("WTF!? server[%s] doesn't have a client Queryable?", server);
                    continue;
                }
                final MultipleSpecificSegmentSpec segmentSpec = new MultipleSpecificSegmentSpec(descriptors);
                final Sequence<T> resultSeqToAdd;
                if (!server.isAssignable() || !populateCache || isBySegment) {
                    // Direct server queryable
                    if (!isBySegment) {
                        resultSeqToAdd = clientQueryable.run(query.withQuerySegmentSpec(segmentSpec), responseContext);
                    } else {
                        // bySegment queries need to be de-serialized, see DirectDruidClient.run()
                        @SuppressWarnings("unchecked") final Query<Result<BySegmentResultValueClass<T>>> bySegmentQuery = (Query<Result<BySegmentResultValueClass<T>>>) ((Query) query);
                        @SuppressWarnings("unchecked") final Sequence<Result<BySegmentResultValueClass<T>>> resultSequence = clientQueryable.run(bySegmentQuery.withQuerySegmentSpec(segmentSpec), responseContext);
                        resultSeqToAdd = (Sequence) Sequences.map(resultSequence, new Function<Result<BySegmentResultValueClass<T>>, Result<BySegmentResultValueClass<T>>>() {

                            @Override
                            public Result<BySegmentResultValueClass<T>> apply(Result<BySegmentResultValueClass<T>> input) {
                                final BySegmentResultValueClass<T> bySegmentValue = input.getValue();
                                return new Result<>(input.getTimestamp(), new BySegmentResultValueClass<T>(Lists.transform(bySegmentValue.getResults(), toolChest.makePreComputeManipulatorFn(query, MetricManipulatorFns.deserializing())), bySegmentValue.getSegmentId(), bySegmentValue.getInterval()));
                            }
                        });
                    }
                } else {
                    // Requires some manipulation on broker side
                    @SuppressWarnings("unchecked") final Sequence<Result<BySegmentResultValueClass<T>>> runningSequence = clientQueryable.run(rewrittenQuery.withQuerySegmentSpec(segmentSpec), responseContext);
                    resultSeqToAdd = new MergeSequence(query.getResultOrdering(), Sequences.<Result<BySegmentResultValueClass<T>>, Sequence<T>>map(runningSequence, new Function<Result<BySegmentResultValueClass<T>>, Sequence<T>>() {

                        private final Function<T, Object> cacheFn = strategy.prepareForCache();

                        // Acctually do something with the results
                        @Override
                        public Sequence<T> apply(Result<BySegmentResultValueClass<T>> input) {
                            final BySegmentResultValueClass<T> value = input.getValue();
                            final CachePopulator cachePopulator = cachePopulatorMap.get(String.format("%s_%s", value.getSegmentId(), value.getInterval()));
                            final Queue<ListenableFuture<Object>> cacheFutures = new ConcurrentLinkedQueue<>();
                            return Sequences.<T>withEffect(Sequences.<T, T>map(Sequences.<T, T>map(Sequences.<T>simple(value.getResults()), new Function<T, T>() {

                                @Override
                                public T apply(final T input) {
                                    if (cachePopulator != null) {
                                        // only compute cache data if populating cache
                                        cacheFutures.add(backgroundExecutorService.submit(new Callable<Object>() {

                                            @Override
                                            public Object call() {
                                                return cacheFn.apply(input);
                                            }
                                        }));
                                    }
                                    return input;
                                }
                            }), toolChest.makePreComputeManipulatorFn(// This casting is sub-optimal, but hasn't caused any major problems yet...
                            (Query) rewrittenQuery, MetricManipulatorFns.deserializing())), new Runnable() {

                                @Override
                                public void run() {
                                    if (cachePopulator != null) {
                                        Futures.addCallback(Futures.allAsList(cacheFutures), new FutureCallback<List<Object>>() {

                                            @Override
                                            public void onSuccess(List<Object> cacheData) {
                                                cachePopulator.populate(cacheData);
                                                // Help out GC by making sure all references are gone
                                                cacheFutures.clear();
                                            }

                                            @Override
                                            public void onFailure(Throwable throwable) {
                                                log.error(throwable, "Background caching failed");
                                            }
                                        }, backgroundExecutorService);
                                    }
                                }
                            }, MoreExecutors.sameThreadExecutor());
                        // End withEffect
                        }
                    }));
                }
                listOfSequences.add(resultSeqToAdd);
            }
        }
    });
}
Also used : BaseQuery(io.druid.query.BaseQuery) Query(io.druid.query.Query) ArrayList(java.util.ArrayList) ShardSpec(io.druid.timeline.partition.ShardSpec) QueryableDruidServer(io.druid.client.selector.QueryableDruidServer) Result(io.druid.query.Result) ServerSelector(io.druid.client.selector.ServerSelector) List(java.util.List) ArrayList(java.util.ArrayList) FutureCallback(com.google.common.util.concurrent.FutureCallback) Optional(com.google.common.base.Optional) QueryableDruidServer(io.druid.client.selector.QueryableDruidServer) BySegmentResultValueClass(io.druid.query.BySegmentResultValueClass) LazySequence(io.druid.java.util.common.guava.LazySequence) BaseSequence(io.druid.java.util.common.guava.BaseSequence) Sequence(io.druid.java.util.common.guava.Sequence) MergeSequence(io.druid.java.util.common.guava.MergeSequence) ImmutableMap(com.google.common.collect.ImmutableMap) TimelineObjectHolder(io.druid.timeline.TimelineObjectHolder) Hasher(com.google.common.hash.Hasher) ConcurrentLinkedQueue(java.util.concurrent.ConcurrentLinkedQueue) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) Interval(org.joda.time.Interval) Cache(io.druid.client.cache.Cache) MultipleSpecificSegmentSpec(io.druid.query.spec.MultipleSpecificSegmentSpec) Function(com.google.common.base.Function) MergeSequence(io.druid.java.util.common.guava.MergeSequence) SegmentDescriptor(io.druid.query.SegmentDescriptor) Iterator(java.util.Iterator) PartitionChunk(io.druid.timeline.partition.PartitionChunk) TypeReference(com.fasterxml.jackson.core.type.TypeReference) Pair(io.druid.java.util.common.Pair) IOException(java.io.IOException) QueryRunner(io.druid.query.QueryRunner) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) LazySequence(io.druid.java.util.common.guava.LazySequence)

Example 9 with TypeReference

use of com.fasterxml.jackson.core.type.TypeReference in project druid by druid-io.

the class CachingQueryRunner method run.

@Override
public Sequence<T> run(Query<T> query, Map<String, Object> responseContext) {
    final CacheStrategy strategy = toolChest.getCacheStrategy(query);
    final boolean populateCache = CacheUtil.populateCacheOnDataNodes(query, strategy, cacheConfig);
    final boolean useCache = CacheUtil.useCacheOnDataNodes(query, strategy, cacheConfig);
    final Cache.NamedKey key;
    if (strategy != null && (useCache || populateCache)) {
        key = CacheUtil.computeSegmentCacheKey(segmentIdentifier, segmentDescriptor, strategy.computeCacheKey(query));
    } else {
        key = null;
    }
    if (useCache) {
        final Function cacheFn = strategy.pullFromCache();
        final byte[] cachedResult = cache.get(key);
        if (cachedResult != null) {
            final TypeReference cacheObjectClazz = strategy.getCacheObjectClazz();
            return Sequences.map(new BaseSequence<>(new BaseSequence.IteratorMaker<T, Iterator<T>>() {

                @Override
                public Iterator<T> make() {
                    try {
                        if (cachedResult.length == 0) {
                            return Iterators.emptyIterator();
                        }
                        return mapper.readValues(mapper.getFactory().createParser(cachedResult), cacheObjectClazz);
                    } catch (IOException e) {
                        throw Throwables.propagate(e);
                    }
                }

                @Override
                public void cleanup(Iterator<T> iterFromMake) {
                }
            }), cacheFn);
        }
    }
    final Collection<ListenableFuture<?>> cacheFutures = Collections.synchronizedList(Lists.<ListenableFuture<?>>newLinkedList());
    if (populateCache) {
        final Function cacheFn = strategy.prepareForCache();
        return Sequences.withEffect(Sequences.map(base.run(query, responseContext), new Function<T, T>() {

            @Override
            public T apply(final T input) {
                final SettableFuture<Object> future = SettableFuture.create();
                cacheFutures.add(future);
                backgroundExecutorService.submit(new Runnable() {

                    @Override
                    public void run() {
                        try {
                            future.set(cacheFn.apply(input));
                        } catch (Exception e) {
                            // if there is exception, should setException to quit the caching processing
                            future.setException(e);
                        }
                    }
                });
                return input;
            }
        }), new Runnable() {

            @Override
            public void run() {
                try {
                    CacheUtil.populate(cache, mapper, key, Futures.allAsList(cacheFutures).get());
                } catch (Exception e) {
                    log.error(e, "Error while getting future for cache task");
                    throw Throwables.propagate(e);
                }
            }
        }, backgroundExecutorService);
    } else {
        return base.run(query, responseContext);
    }
}
Also used : IOException(java.io.IOException) IOException(java.io.IOException) Function(com.google.common.base.Function) Iterator(java.util.Iterator) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) TypeReference(com.fasterxml.jackson.core.type.TypeReference) CacheStrategy(io.druid.query.CacheStrategy) Cache(io.druid.client.cache.Cache)

Example 10 with TypeReference

use of com.fasterxml.jackson.core.type.TypeReference in project druid by druid-io.

the class DirectDruidClient method run.

@Override
public Sequence<T> run(final Query<T> query, final Map<String, Object> context) {
    QueryToolChest<T, Query<T>> toolChest = warehouse.getToolChest(query);
    boolean isBySegment = BaseQuery.getContextBySegment(query, false);
    Pair<JavaType, JavaType> types = typesMap.get(query.getClass());
    if (types == null) {
        final TypeFactory typeFactory = objectMapper.getTypeFactory();
        JavaType baseType = typeFactory.constructType(toolChest.getResultTypeReference());
        JavaType bySegmentType = typeFactory.constructParametricType(Result.class, typeFactory.constructParametricType(BySegmentResultValueClass.class, baseType));
        types = Pair.of(baseType, bySegmentType);
        typesMap.put(query.getClass(), types);
    }
    final JavaType typeRef;
    if (isBySegment) {
        typeRef = types.rhs;
    } else {
        typeRef = types.lhs;
    }
    final ListenableFuture<InputStream> future;
    final String url = String.format("http://%s/druid/v2/", host);
    final String cancelUrl = String.format("http://%s/druid/v2/%s", host, query.getId());
    try {
        log.debug("Querying queryId[%s] url[%s]", query.getId(), url);
        final long requestStartTime = System.currentTimeMillis();
        final ServiceMetricEvent.Builder builder = toolChest.makeMetricBuilder(query);
        builder.setDimension("server", host);
        final HttpResponseHandler<InputStream, InputStream> responseHandler = new HttpResponseHandler<InputStream, InputStream>() {

            private long responseStartTime;

            private final AtomicLong byteCount = new AtomicLong(0);

            private final BlockingQueue<InputStream> queue = new LinkedBlockingQueue<>();

            private final AtomicBoolean done = new AtomicBoolean(false);

            @Override
            public ClientResponse<InputStream> handleResponse(HttpResponse response) {
                log.debug("Initial response from url[%s] for queryId[%s]", url, query.getId());
                responseStartTime = System.currentTimeMillis();
                emitter.emit(builder.build("query/node/ttfb", responseStartTime - requestStartTime));
                try {
                    final String responseContext = response.headers().get("X-Druid-Response-Context");
                    // context may be null in case of error or query timeout
                    if (responseContext != null) {
                        context.putAll(objectMapper.<Map<String, Object>>readValue(responseContext, new TypeReference<Map<String, Object>>() {
                        }));
                    }
                    queue.put(new ChannelBufferInputStream(response.getContent()));
                } catch (final IOException e) {
                    log.error(e, "Error parsing response context from url [%s]", url);
                    return ClientResponse.<InputStream>finished(new InputStream() {

                        @Override
                        public int read() throws IOException {
                            throw e;
                        }
                    });
                } catch (InterruptedException e) {
                    log.error(e, "Queue appending interrupted");
                    Thread.currentThread().interrupt();
                    throw Throwables.propagate(e);
                }
                byteCount.addAndGet(response.getContent().readableBytes());
                return ClientResponse.<InputStream>finished(new SequenceInputStream(new Enumeration<InputStream>() {

                    @Override
                    public boolean hasMoreElements() {
                        // Then the stream should be spouting good InputStreams.
                        synchronized (done) {
                            return !done.get() || !queue.isEmpty();
                        }
                    }

                    @Override
                    public InputStream nextElement() {
                        try {
                            return queue.take();
                        } catch (InterruptedException e) {
                            Thread.currentThread().interrupt();
                            throw Throwables.propagate(e);
                        }
                    }
                }));
            }

            @Override
            public ClientResponse<InputStream> handleChunk(ClientResponse<InputStream> clientResponse, HttpChunk chunk) {
                final ChannelBuffer channelBuffer = chunk.getContent();
                final int bytes = channelBuffer.readableBytes();
                if (bytes > 0) {
                    try {
                        queue.put(new ChannelBufferInputStream(channelBuffer));
                    } catch (InterruptedException e) {
                        log.error(e, "Unable to put finalizing input stream into Sequence queue for url [%s]", url);
                        Thread.currentThread().interrupt();
                        throw Throwables.propagate(e);
                    }
                    byteCount.addAndGet(bytes);
                }
                return clientResponse;
            }

            @Override
            public ClientResponse<InputStream> done(ClientResponse<InputStream> clientResponse) {
                long stopTime = System.currentTimeMillis();
                log.debug("Completed queryId[%s] request to url[%s] with %,d bytes returned in %,d millis [%,f b/s].", query.getId(), url, byteCount.get(), stopTime - responseStartTime, byteCount.get() / (0.0001 * (stopTime - responseStartTime)));
                emitter.emit(builder.build("query/node/time", stopTime - requestStartTime));
                emitter.emit(builder.build("query/node/bytes", byteCount.get()));
                synchronized (done) {
                    try {
                        // An empty byte array is put at the end to give the SequenceInputStream.close() as something to close out
                        // after done is set to true, regardless of the rest of the stream's state.
                        queue.put(ByteSource.empty().openStream());
                    } catch (InterruptedException e) {
                        log.error(e, "Unable to put finalizing input stream into Sequence queue for url [%s]", url);
                        Thread.currentThread().interrupt();
                        throw Throwables.propagate(e);
                    } catch (IOException e) {
                        // This should never happen
                        throw Throwables.propagate(e);
                    } finally {
                        done.set(true);
                    }
                }
                return ClientResponse.<InputStream>finished(clientResponse.getObj());
            }

            @Override
            public void exceptionCaught(final ClientResponse<InputStream> clientResponse, final Throwable e) {
                // Don't wait for lock in case the lock had something to do with the error
                synchronized (done) {
                    done.set(true);
                    // Make a best effort to put a zero length buffer into the queue in case something is waiting on the take()
                    // If nothing is waiting on take(), this will be closed out anyways.
                    queue.offer(new InputStream() {

                        @Override
                        public int read() throws IOException {
                            throw new IOException(e);
                        }
                    });
                }
            }
        };
        future = httpClient.go(new Request(HttpMethod.POST, new URL(url)).setContent(objectMapper.writeValueAsBytes(query)).setHeader(HttpHeaders.Names.CONTENT_TYPE, isSmile ? SmileMediaTypes.APPLICATION_JACKSON_SMILE : MediaType.APPLICATION_JSON), responseHandler);
        queryWatcher.registerQuery(query, future);
        openConnections.getAndIncrement();
        Futures.addCallback(future, new FutureCallback<InputStream>() {

            @Override
            public void onSuccess(InputStream result) {
                openConnections.getAndDecrement();
            }

            @Override
            public void onFailure(Throwable t) {
                openConnections.getAndDecrement();
                if (future.isCancelled()) {
                    // forward the cancellation to underlying queriable node
                    try {
                        StatusResponseHolder res = httpClient.go(new Request(HttpMethod.DELETE, new URL(cancelUrl)).setContent(objectMapper.writeValueAsBytes(query)).setHeader(HttpHeaders.Names.CONTENT_TYPE, isSmile ? SmileMediaTypes.APPLICATION_JACKSON_SMILE : MediaType.APPLICATION_JSON), new StatusResponseHandler(Charsets.UTF_8)).get();
                        if (res.getStatus().getCode() >= 500) {
                            throw new RE("Error cancelling query[%s]: queriable node returned status[%d] [%s].", res.getStatus().getCode(), res.getStatus().getReasonPhrase());
                        }
                    } catch (IOException | ExecutionException | InterruptedException e) {
                        Throwables.propagate(e);
                    }
                }
            }
        });
    } catch (IOException e) {
        throw Throwables.propagate(e);
    }
    Sequence<T> retVal = new BaseSequence<>(new BaseSequence.IteratorMaker<T, JsonParserIterator<T>>() {

        @Override
        public JsonParserIterator<T> make() {
            return new JsonParserIterator<T>(typeRef, future, url);
        }

        @Override
        public void cleanup(JsonParserIterator<T> iterFromMake) {
            CloseQuietly.close(iterFromMake);
        }
    });
    // avoid the cost of de-serializing and then re-serializing again when adding to cache
    if (!isBySegment) {
        retVal = Sequences.map(retVal, toolChest.makePreComputeManipulatorFn(query, MetricManipulatorFns.deserializing()));
    }
    return retVal;
}
Also used : ClientResponse(com.metamx.http.client.response.ClientResponse) BaseQuery(io.druid.query.BaseQuery) Query(io.druid.query.Query) QueryInterruptedException(io.druid.query.QueryInterruptedException) URL(java.net.URL) ChannelBuffer(org.jboss.netty.buffer.ChannelBuffer) StatusResponseHolder(com.metamx.http.client.response.StatusResponseHolder) TypeReference(com.fasterxml.jackson.core.type.TypeReference) ChannelBufferInputStream(org.jboss.netty.buffer.ChannelBufferInputStream) BlockingQueue(java.util.concurrent.BlockingQueue) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) Enumeration(java.util.Enumeration) ChannelBufferInputStream(org.jboss.netty.buffer.ChannelBufferInputStream) SequenceInputStream(java.io.SequenceInputStream) InputStream(java.io.InputStream) Request(com.metamx.http.client.Request) BySegmentResultValueClass(io.druid.query.BySegmentResultValueClass) HttpResponse(org.jboss.netty.handler.codec.http.HttpResponse) IOException(java.io.IOException) BaseSequence(io.druid.java.util.common.guava.BaseSequence) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) JavaType(com.fasterxml.jackson.databind.JavaType) AtomicLong(java.util.concurrent.atomic.AtomicLong) RE(io.druid.java.util.common.RE) SequenceInputStream(java.io.SequenceInputStream) ServiceMetricEvent(com.metamx.emitter.service.ServiceMetricEvent) StatusResponseHandler(com.metamx.http.client.response.StatusResponseHandler) TypeFactory(com.fasterxml.jackson.databind.type.TypeFactory) HttpResponseHandler(com.metamx.http.client.response.HttpResponseHandler) HttpChunk(org.jboss.netty.handler.codec.http.HttpChunk)

Aggregations

TypeReference (com.fasterxml.jackson.core.type.TypeReference)67 IOException (java.io.IOException)27 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)25 Test (org.junit.Test)23 Map (java.util.Map)13 BaseMandrillAnonymousListResponse (com.cribbstechnologies.clients.mandrill.model.response.BaseMandrillAnonymousListResponse)7 StringWriter (java.io.StringWriter)7 ArrayList (java.util.ArrayList)7 ImmutableMap (com.google.common.collect.ImmutableMap)6 HashMap (java.util.HashMap)6 InputStream (java.io.InputStream)5 List (java.util.List)5 SimpleModule (com.fasterxml.jackson.databind.module.SimpleModule)4 AuditInfo (io.druid.audit.AuditInfo)4 Interval (org.joda.time.Interval)4 JsonParseException (com.fasterxml.jackson.core.JsonParseException)3 JsonProcessingException (com.fasterxml.jackson.core.JsonProcessingException)3 JsonMappingException (com.fasterxml.jackson.databind.JsonMappingException)3 JsonNode (com.fasterxml.jackson.databind.JsonNode)3 DefaultObjectMapper (io.druid.jackson.DefaultObjectMapper)3