Search in sources :

Example 1 with FutureCallback

use of com.google.common.util.concurrent.FutureCallback in project druid by druid-io.

the class RemoteTaskRunner method start.

@Override
@LifecycleStart
public void start() {
    try {
        if (started) {
            return;
        }
        final MutableInt waitingFor = new MutableInt(1);
        final Object waitingForMonitor = new Object();
        // Add listener for creation/deletion of workers
        workerPathCache.getListenable().addListener(new PathChildrenCacheListener() {

            @Override
            public void childEvent(CuratorFramework client, final PathChildrenCacheEvent event) throws Exception {
                final Worker worker;
                switch(event.getType()) {
                    case CHILD_ADDED:
                        worker = jsonMapper.readValue(event.getData().getData(), Worker.class);
                        synchronized (waitingForMonitor) {
                            waitingFor.increment();
                        }
                        Futures.addCallback(addWorker(worker), new FutureCallback<ZkWorker>() {

                            @Override
                            public void onSuccess(ZkWorker zkWorker) {
                                synchronized (waitingForMonitor) {
                                    waitingFor.decrement();
                                    waitingForMonitor.notifyAll();
                                }
                            }

                            @Override
                            public void onFailure(Throwable throwable) {
                                synchronized (waitingForMonitor) {
                                    waitingFor.decrement();
                                    waitingForMonitor.notifyAll();
                                }
                            }
                        });
                        break;
                    case CHILD_UPDATED:
                        worker = jsonMapper.readValue(event.getData().getData(), Worker.class);
                        updateWorker(worker);
                        break;
                    case CHILD_REMOVED:
                        worker = jsonMapper.readValue(event.getData().getData(), Worker.class);
                        removeWorker(worker);
                        break;
                    case INITIALIZED:
                        // Schedule cleanup for task status of the workers that might have disconnected while overlord was not running
                        List<String> workers;
                        try {
                            workers = cf.getChildren().forPath(indexerZkConfig.getStatusPath());
                        } catch (KeeperException.NoNodeException e) {
                            // statusPath doesn't exist yet; can occur if no middleManagers have started.
                            workers = ImmutableList.of();
                        }
                        for (String workerId : workers) {
                            final String workerAnnouncePath = JOINER.join(indexerZkConfig.getAnnouncementsPath(), workerId);
                            final String workerStatusPath = JOINER.join(indexerZkConfig.getStatusPath(), workerId);
                            if (!zkWorkers.containsKey(workerId) && cf.checkExists().forPath(workerAnnouncePath) == null) {
                                try {
                                    scheduleTasksCleanupForWorker(workerId, cf.getChildren().forPath(workerStatusPath));
                                } catch (Exception e) {
                                    log.warn(e, "Could not schedule cleanup for worker[%s] during startup (maybe someone removed the status znode[%s]?). Skipping.", workerId, workerStatusPath);
                                }
                            }
                        }
                        synchronized (waitingForMonitor) {
                            waitingFor.decrement();
                            waitingForMonitor.notifyAll();
                        }
                    default:
                        break;
                }
            }
        });
        workerPathCache.start(PathChildrenCache.StartMode.POST_INITIALIZED_EVENT);
        synchronized (waitingForMonitor) {
            while (waitingFor.intValue() > 0) {
                waitingForMonitor.wait();
            }
        }
        scheduleBlackListedNodesCleanUp();
        resourceManagement.startManagement(this);
        started = true;
    } catch (Exception e) {
        throw Throwables.propagate(e);
    }
}
Also used : PathChildrenCacheListener(org.apache.curator.framework.recipes.cache.PathChildrenCacheListener) PathChildrenCacheEvent(org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent) KeeperException(org.apache.zookeeper.KeeperException) MalformedURLException(java.net.MalformedURLException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) CuratorFramework(org.apache.curator.framework.CuratorFramework) MutableInt(org.apache.commons.lang.mutable.MutableInt) Worker(io.druid.indexing.worker.Worker) List(java.util.List) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) ImmutableList(com.google.common.collect.ImmutableList) FutureCallback(com.google.common.util.concurrent.FutureCallback) LifecycleStart(io.druid.java.util.common.lifecycle.LifecycleStart)

Example 2 with FutureCallback

use of com.google.common.util.concurrent.FutureCallback in project druid by druid-io.

the class CachingClusteredClient method run.

@Override
public Sequence<T> run(final Query<T> query, final Map<String, Object> responseContext) {
    final QueryToolChest<T, Query<T>> toolChest = warehouse.getToolChest(query);
    final CacheStrategy<T, Object, Query<T>> strategy = toolChest.getCacheStrategy(query);
    final Map<DruidServer, List<SegmentDescriptor>> serverSegments = Maps.newTreeMap();
    final List<Pair<Interval, byte[]>> cachedResults = Lists.newArrayList();
    final Map<String, CachePopulator> cachePopulatorMap = Maps.newHashMap();
    final boolean useCache = CacheUtil.useCacheOnBrokers(query, strategy, cacheConfig);
    final boolean populateCache = CacheUtil.populateCacheOnBrokers(query, strategy, cacheConfig);
    final boolean isBySegment = BaseQuery.getContextBySegment(query, false);
    final ImmutableMap.Builder<String, Object> contextBuilder = new ImmutableMap.Builder<>();
    final int priority = BaseQuery.getContextPriority(query, 0);
    contextBuilder.put("priority", priority);
    if (populateCache) {
        // prevent down-stream nodes from caching results as well if we are populating the cache
        contextBuilder.put(CacheConfig.POPULATE_CACHE, false);
        contextBuilder.put("bySegment", true);
    }
    TimelineLookup<String, ServerSelector> timeline = serverView.getTimeline(query.getDataSource());
    if (timeline == null) {
        return Sequences.empty();
    }
    // build set of segments to query
    Set<Pair<ServerSelector, SegmentDescriptor>> segments = Sets.newLinkedHashSet();
    List<TimelineObjectHolder<String, ServerSelector>> serversLookup = Lists.newLinkedList();
    // Note that enabling this leads to putting uncovered intervals information in the response headers
    // and might blow up in some cases https://github.com/druid-io/druid/issues/2108
    int uncoveredIntervalsLimit = BaseQuery.getContextUncoveredIntervalsLimit(query, 0);
    if (uncoveredIntervalsLimit > 0) {
        List<Interval> uncoveredIntervals = Lists.newArrayListWithCapacity(uncoveredIntervalsLimit);
        boolean uncoveredIntervalsOverflowed = false;
        for (Interval interval : query.getIntervals()) {
            Iterable<TimelineObjectHolder<String, ServerSelector>> lookup = timeline.lookup(interval);
            long startMillis = interval.getStartMillis();
            long endMillis = interval.getEndMillis();
            for (TimelineObjectHolder<String, ServerSelector> holder : lookup) {
                Interval holderInterval = holder.getInterval();
                long intervalStart = holderInterval.getStartMillis();
                if (!uncoveredIntervalsOverflowed && startMillis != intervalStart) {
                    if (uncoveredIntervalsLimit > uncoveredIntervals.size()) {
                        uncoveredIntervals.add(new Interval(startMillis, intervalStart));
                    } else {
                        uncoveredIntervalsOverflowed = true;
                    }
                }
                startMillis = holderInterval.getEndMillis();
                serversLookup.add(holder);
            }
            if (!uncoveredIntervalsOverflowed && startMillis < endMillis) {
                if (uncoveredIntervalsLimit > uncoveredIntervals.size()) {
                    uncoveredIntervals.add(new Interval(startMillis, endMillis));
                } else {
                    uncoveredIntervalsOverflowed = true;
                }
            }
        }
        if (!uncoveredIntervals.isEmpty()) {
            // This returns intervals for which NO segment is present.
            // Which is not necessarily an indication that the data doesn't exist or is
            // incomplete. The data could exist and just not be loaded yet.  In either
            // case, though, this query will not include any data from the identified intervals.
            responseContext.put("uncoveredIntervals", uncoveredIntervals);
            responseContext.put("uncoveredIntervalsOverflowed", uncoveredIntervalsOverflowed);
        }
    } else {
        for (Interval interval : query.getIntervals()) {
            Iterables.addAll(serversLookup, timeline.lookup(interval));
        }
    }
    // Let tool chest filter out unneeded segments
    final List<TimelineObjectHolder<String, ServerSelector>> filteredServersLookup = toolChest.filterSegments(query, serversLookup);
    Map<String, Optional<RangeSet<String>>> dimensionRangeCache = Maps.newHashMap();
    // Filter unneeded chunks based on partition dimension
    for (TimelineObjectHolder<String, ServerSelector> holder : filteredServersLookup) {
        final Set<PartitionChunk<ServerSelector>> filteredChunks = DimFilterUtils.filterShards(query.getFilter(), holder.getObject(), new Function<PartitionChunk<ServerSelector>, ShardSpec>() {

            @Override
            public ShardSpec apply(PartitionChunk<ServerSelector> input) {
                return input.getObject().getSegment().getShardSpec();
            }
        }, dimensionRangeCache);
        for (PartitionChunk<ServerSelector> chunk : filteredChunks) {
            ServerSelector selector = chunk.getObject();
            final SegmentDescriptor descriptor = new SegmentDescriptor(holder.getInterval(), holder.getVersion(), chunk.getChunkNumber());
            segments.add(Pair.of(selector, descriptor));
        }
    }
    final byte[] queryCacheKey;
    if (// implies strategy != null
    (populateCache || useCache) && // explicit bySegment queries are never cached
    !isBySegment) {
        queryCacheKey = strategy.computeCacheKey(query);
    } else {
        queryCacheKey = null;
    }
    if (query.getContext().get(QueryResource.HDR_IF_NONE_MATCH) != null) {
        String prevEtag = (String) query.getContext().get(QueryResource.HDR_IF_NONE_MATCH);
        //compute current Etag
        Hasher hasher = Hashing.sha1().newHasher();
        boolean hasOnlyHistoricalSegments = true;
        for (Pair<ServerSelector, SegmentDescriptor> p : segments) {
            if (!p.lhs.pick().getServer().isAssignable()) {
                hasOnlyHistoricalSegments = false;
                break;
            }
            hasher.putString(p.lhs.getSegment().getIdentifier(), Charsets.UTF_8);
        }
        if (hasOnlyHistoricalSegments) {
            hasher.putBytes(queryCacheKey == null ? strategy.computeCacheKey(query) : queryCacheKey);
            String currEtag = Base64.encodeBase64String(hasher.hash().asBytes());
            responseContext.put(QueryResource.HDR_ETAG, currEtag);
            if (prevEtag.equals(currEtag)) {
                return Sequences.empty();
            }
        }
    }
    if (queryCacheKey != null) {
        // cachKeys map must preserve segment ordering, in order for shards to always be combined in the same order
        Map<Pair<ServerSelector, SegmentDescriptor>, Cache.NamedKey> cacheKeys = Maps.newLinkedHashMap();
        for (Pair<ServerSelector, SegmentDescriptor> segment : segments) {
            final Cache.NamedKey segmentCacheKey = CacheUtil.computeSegmentCacheKey(segment.lhs.getSegment().getIdentifier(), segment.rhs, queryCacheKey);
            cacheKeys.put(segment, segmentCacheKey);
        }
        // Pull cached segments from cache and remove from set of segments to query
        final Map<Cache.NamedKey, byte[]> cachedValues;
        if (useCache) {
            cachedValues = cache.getBulk(Iterables.limit(cacheKeys.values(), cacheConfig.getCacheBulkMergeLimit()));
        } else {
            cachedValues = ImmutableMap.of();
        }
        for (Map.Entry<Pair<ServerSelector, SegmentDescriptor>, Cache.NamedKey> entry : cacheKeys.entrySet()) {
            Pair<ServerSelector, SegmentDescriptor> segment = entry.getKey();
            Cache.NamedKey segmentCacheKey = entry.getValue();
            final Interval segmentQueryInterval = segment.rhs.getInterval();
            final byte[] cachedValue = cachedValues.get(segmentCacheKey);
            if (cachedValue != null) {
                // remove cached segment from set of segments to query
                segments.remove(segment);
                cachedResults.add(Pair.of(segmentQueryInterval, cachedValue));
            } else if (populateCache) {
                // otherwise, if populating cache, add segment to list of segments to cache
                final String segmentIdentifier = segment.lhs.getSegment().getIdentifier();
                cachePopulatorMap.put(String.format("%s_%s", segmentIdentifier, segmentQueryInterval), new CachePopulator(cache, objectMapper, segmentCacheKey));
            }
        }
    }
    // Compile list of all segments not pulled from cache
    for (Pair<ServerSelector, SegmentDescriptor> segment : segments) {
        final QueryableDruidServer queryableDruidServer = segment.lhs.pick();
        if (queryableDruidServer == null) {
            log.makeAlert("No servers found for SegmentDescriptor[%s] for DataSource[%s]?! How can this be?!", segment.rhs, query.getDataSource()).emit();
        } else {
            final DruidServer server = queryableDruidServer.getServer();
            List<SegmentDescriptor> descriptors = serverSegments.get(server);
            if (descriptors == null) {
                descriptors = Lists.newArrayList();
                serverSegments.put(server, descriptors);
            }
            descriptors.add(segment.rhs);
        }
    }
    return new LazySequence<>(new Supplier<Sequence<T>>() {

        @Override
        public Sequence<T> get() {
            ArrayList<Sequence<T>> sequencesByInterval = Lists.newArrayList();
            addSequencesFromCache(sequencesByInterval);
            addSequencesFromServer(sequencesByInterval);
            return mergeCachedAndUncachedSequences(query, sequencesByInterval);
        }

        private void addSequencesFromCache(ArrayList<Sequence<T>> listOfSequences) {
            if (strategy == null) {
                return;
            }
            final Function<Object, T> pullFromCacheFunction = strategy.pullFromCache();
            final TypeReference<Object> cacheObjectClazz = strategy.getCacheObjectClazz();
            for (Pair<Interval, byte[]> cachedResultPair : cachedResults) {
                final byte[] cachedResult = cachedResultPair.rhs;
                Sequence<Object> cachedSequence = new BaseSequence<>(new BaseSequence.IteratorMaker<Object, Iterator<Object>>() {

                    @Override
                    public Iterator<Object> make() {
                        try {
                            if (cachedResult.length == 0) {
                                return Iterators.emptyIterator();
                            }
                            return objectMapper.readValues(objectMapper.getFactory().createParser(cachedResult), cacheObjectClazz);
                        } catch (IOException e) {
                            throw Throwables.propagate(e);
                        }
                    }

                    @Override
                    public void cleanup(Iterator<Object> iterFromMake) {
                    }
                });
                listOfSequences.add(Sequences.map(cachedSequence, pullFromCacheFunction));
            }
        }

        private void addSequencesFromServer(ArrayList<Sequence<T>> listOfSequences) {
            listOfSequences.ensureCapacity(listOfSequences.size() + serverSegments.size());
            final Query<T> rewrittenQuery = query.withOverriddenContext(contextBuilder.build());
            // The data gets handled as a Future and parsed in the long Sequence chain in the resultSeqToAdd setter.
            for (Map.Entry<DruidServer, List<SegmentDescriptor>> entry : serverSegments.entrySet()) {
                final DruidServer server = entry.getKey();
                final List<SegmentDescriptor> descriptors = entry.getValue();
                final QueryRunner clientQueryable = serverView.getQueryRunner(server);
                if (clientQueryable == null) {
                    log.error("WTF!? server[%s] doesn't have a client Queryable?", server);
                    continue;
                }
                final MultipleSpecificSegmentSpec segmentSpec = new MultipleSpecificSegmentSpec(descriptors);
                final Sequence<T> resultSeqToAdd;
                if (!server.isAssignable() || !populateCache || isBySegment) {
                    // Direct server queryable
                    if (!isBySegment) {
                        resultSeqToAdd = clientQueryable.run(query.withQuerySegmentSpec(segmentSpec), responseContext);
                    } else {
                        // bySegment queries need to be de-serialized, see DirectDruidClient.run()
                        @SuppressWarnings("unchecked") final Query<Result<BySegmentResultValueClass<T>>> bySegmentQuery = (Query<Result<BySegmentResultValueClass<T>>>) ((Query) query);
                        @SuppressWarnings("unchecked") final Sequence<Result<BySegmentResultValueClass<T>>> resultSequence = clientQueryable.run(bySegmentQuery.withQuerySegmentSpec(segmentSpec), responseContext);
                        resultSeqToAdd = (Sequence) Sequences.map(resultSequence, new Function<Result<BySegmentResultValueClass<T>>, Result<BySegmentResultValueClass<T>>>() {

                            @Override
                            public Result<BySegmentResultValueClass<T>> apply(Result<BySegmentResultValueClass<T>> input) {
                                final BySegmentResultValueClass<T> bySegmentValue = input.getValue();
                                return new Result<>(input.getTimestamp(), new BySegmentResultValueClass<T>(Lists.transform(bySegmentValue.getResults(), toolChest.makePreComputeManipulatorFn(query, MetricManipulatorFns.deserializing())), bySegmentValue.getSegmentId(), bySegmentValue.getInterval()));
                            }
                        });
                    }
                } else {
                    // Requires some manipulation on broker side
                    @SuppressWarnings("unchecked") final Sequence<Result<BySegmentResultValueClass<T>>> runningSequence = clientQueryable.run(rewrittenQuery.withQuerySegmentSpec(segmentSpec), responseContext);
                    resultSeqToAdd = new MergeSequence(query.getResultOrdering(), Sequences.<Result<BySegmentResultValueClass<T>>, Sequence<T>>map(runningSequence, new Function<Result<BySegmentResultValueClass<T>>, Sequence<T>>() {

                        private final Function<T, Object> cacheFn = strategy.prepareForCache();

                        // Acctually do something with the results
                        @Override
                        public Sequence<T> apply(Result<BySegmentResultValueClass<T>> input) {
                            final BySegmentResultValueClass<T> value = input.getValue();
                            final CachePopulator cachePopulator = cachePopulatorMap.get(String.format("%s_%s", value.getSegmentId(), value.getInterval()));
                            final Queue<ListenableFuture<Object>> cacheFutures = new ConcurrentLinkedQueue<>();
                            return Sequences.<T>withEffect(Sequences.<T, T>map(Sequences.<T, T>map(Sequences.<T>simple(value.getResults()), new Function<T, T>() {

                                @Override
                                public T apply(final T input) {
                                    if (cachePopulator != null) {
                                        // only compute cache data if populating cache
                                        cacheFutures.add(backgroundExecutorService.submit(new Callable<Object>() {

                                            @Override
                                            public Object call() {
                                                return cacheFn.apply(input);
                                            }
                                        }));
                                    }
                                    return input;
                                }
                            }), toolChest.makePreComputeManipulatorFn(// This casting is sub-optimal, but hasn't caused any major problems yet...
                            (Query) rewrittenQuery, MetricManipulatorFns.deserializing())), new Runnable() {

                                @Override
                                public void run() {
                                    if (cachePopulator != null) {
                                        Futures.addCallback(Futures.allAsList(cacheFutures), new FutureCallback<List<Object>>() {

                                            @Override
                                            public void onSuccess(List<Object> cacheData) {
                                                cachePopulator.populate(cacheData);
                                                // Help out GC by making sure all references are gone
                                                cacheFutures.clear();
                                            }

                                            @Override
                                            public void onFailure(Throwable throwable) {
                                                log.error(throwable, "Background caching failed");
                                            }
                                        }, backgroundExecutorService);
                                    }
                                }
                            }, MoreExecutors.sameThreadExecutor());
                        // End withEffect
                        }
                    }));
                }
                listOfSequences.add(resultSeqToAdd);
            }
        }
    });
}
Also used : BaseQuery(io.druid.query.BaseQuery) Query(io.druid.query.Query) ArrayList(java.util.ArrayList) ShardSpec(io.druid.timeline.partition.ShardSpec) QueryableDruidServer(io.druid.client.selector.QueryableDruidServer) Result(io.druid.query.Result) ServerSelector(io.druid.client.selector.ServerSelector) List(java.util.List) ArrayList(java.util.ArrayList) FutureCallback(com.google.common.util.concurrent.FutureCallback) Optional(com.google.common.base.Optional) QueryableDruidServer(io.druid.client.selector.QueryableDruidServer) BySegmentResultValueClass(io.druid.query.BySegmentResultValueClass) LazySequence(io.druid.java.util.common.guava.LazySequence) BaseSequence(io.druid.java.util.common.guava.BaseSequence) Sequence(io.druid.java.util.common.guava.Sequence) MergeSequence(io.druid.java.util.common.guava.MergeSequence) ImmutableMap(com.google.common.collect.ImmutableMap) TimelineObjectHolder(io.druid.timeline.TimelineObjectHolder) Hasher(com.google.common.hash.Hasher) ConcurrentLinkedQueue(java.util.concurrent.ConcurrentLinkedQueue) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) Interval(org.joda.time.Interval) Cache(io.druid.client.cache.Cache) MultipleSpecificSegmentSpec(io.druid.query.spec.MultipleSpecificSegmentSpec) Function(com.google.common.base.Function) MergeSequence(io.druid.java.util.common.guava.MergeSequence) SegmentDescriptor(io.druid.query.SegmentDescriptor) Iterator(java.util.Iterator) PartitionChunk(io.druid.timeline.partition.PartitionChunk) TypeReference(com.fasterxml.jackson.core.type.TypeReference) Pair(io.druid.java.util.common.Pair) IOException(java.io.IOException) QueryRunner(io.druid.query.QueryRunner) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) LazySequence(io.druid.java.util.common.guava.LazySequence)

Example 3 with FutureCallback

use of com.google.common.util.concurrent.FutureCallback in project druid by druid-io.

the class FiniteAppenderatorDriver method publishAll.

/**
   * Push and publish all segments to the metadata store.
   *
   * @param publisher        segment publisher
   * @param wrappedCommitter wrapped committer (from wrapCommitter)
   *
   * @return published segments and metadata, or null if segments could not be published due to transaction failure
   * with commit metadata.
   */
private SegmentsAndMetadata publishAll(final TransactionalSegmentPublisher publisher, final Committer wrappedCommitter) throws InterruptedException {
    final List<SegmentIdentifier> theSegments = ImmutableList.copyOf(appenderator.getSegments());
    long nTry = 0;
    while (true) {
        try {
            log.info("Pushing segments: [%s]", Joiner.on(", ").join(theSegments));
            final SegmentsAndMetadata segmentsAndMetadata = appenderator.push(theSegments, wrappedCommitter).get();
            // Sanity check
            if (!segmentsToIdentifiers(segmentsAndMetadata.getSegments()).equals(Sets.newHashSet(theSegments))) {
                throw new ISE("WTF?! Pushed different segments than requested. Pushed[%s], requested[%s].", Joiner.on(", ").join(identifiersToStrings(segmentsToIdentifiers(segmentsAndMetadata.getSegments()))), Joiner.on(", ").join(identifiersToStrings(theSegments)));
            }
            log.info("Publishing segments with commitMetadata[%s]: [%s]", segmentsAndMetadata.getCommitMetadata(), Joiner.on(", ").join(segmentsAndMetadata.getSegments()));
            if (segmentsAndMetadata.getSegments().isEmpty()) {
                log.info("Nothing to publish, skipping publish step.");
            } else {
                final boolean published = publisher.publishSegments(ImmutableSet.copyOf(segmentsAndMetadata.getSegments()), ((FiniteAppenderatorDriverMetadata) segmentsAndMetadata.getCommitMetadata()).getCallerMetadata());
                if (published) {
                    log.info("Published segments, awaiting handoff.");
                } else {
                    log.info("Transaction failure while publishing segments, checking if someone else beat us to it.");
                    if (usedSegmentChecker.findUsedSegments(segmentsToIdentifiers(segmentsAndMetadata.getSegments())).equals(Sets.newHashSet(segmentsAndMetadata.getSegments()))) {
                        log.info("Our segments really do exist, awaiting handoff.");
                    } else {
                        log.warn("Our segments don't exist, giving up.");
                        return null;
                    }
                }
            }
            for (final DataSegment dataSegment : segmentsAndMetadata.getSegments()) {
                handoffNotifier.registerSegmentHandoffCallback(new SegmentDescriptor(dataSegment.getInterval(), dataSegment.getVersion(), dataSegment.getShardSpec().getPartitionNum()), MoreExecutors.sameThreadExecutor(), new Runnable() {

                    @Override
                    public void run() {
                        final SegmentIdentifier identifier = SegmentIdentifier.fromDataSegment(dataSegment);
                        log.info("Segment[%s] successfully handed off, dropping.", identifier);
                        metrics.incrementHandOffCount();
                        final ListenableFuture<?> dropFuture = appenderator.drop(identifier);
                        Futures.addCallback(dropFuture, new FutureCallback<Object>() {

                            @Override
                            public void onSuccess(Object result) {
                                synchronized (handoffMonitor) {
                                    handoffMonitor.notifyAll();
                                }
                            }

                            @Override
                            public void onFailure(Throwable e) {
                                log.warn(e, "Failed to drop segment[%s]?!");
                                synchronized (handoffMonitor) {
                                    handoffMonitor.notifyAll();
                                }
                            }
                        });
                    }
                });
            }
            return segmentsAndMetadata;
        } catch (InterruptedException e) {
            throw e;
        } catch (Exception e) {
            final long sleepMillis = computeNextRetrySleep(++nTry);
            log.warn(e, "Failed publishAll (try %d), retrying in %,dms.", nTry, sleepMillis);
            Thread.sleep(sleepMillis);
        }
    }
}
Also used : DataSegment(io.druid.timeline.DataSegment) IOException(java.io.IOException) SegmentDescriptor(io.druid.query.SegmentDescriptor) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) ISE(io.druid.java.util.common.ISE) FutureCallback(com.google.common.util.concurrent.FutureCallback)

Example 4 with FutureCallback

use of com.google.common.util.concurrent.FutureCallback in project crate by crate.

the class BulkShardProcessor method createPendingIndices.

private void createPendingIndices() {
    final List<PendingRequest> pendings = new ArrayList<>();
    final Set<String> indices;
    synchronized (requestsForNewIndices) {
        indices = ImmutableSet.copyOf(Iterables.filter(Sets.difference(requestsForNewIndices.keySet(), indicesCreated), shouldAutocreateIndexPredicate));
        for (Map.Entry<String, List<PendingRequest>> entry : requestsForNewIndices.entrySet()) {
            pendings.addAll(entry.getValue());
        }
        requestsForNewIndices.clear();
        pendingNewIndexRequests.set(0);
    }
    if (pendings.size() > 0 || indices.size() > 0) {
        LOGGER.debug("create {} pending indices in bulk...", indices.size());
        BulkCreateIndicesRequest bulkCreateIndicesRequest = new BulkCreateIndicesRequest(indices, jobId);
        final FutureCallback<Void> indicesCreatedCallback = new FutureCallback<Void>() {

            @Override
            public void onSuccess(@Nullable Void result) {
                if (failure.get() != null) {
                    return;
                }
                trace("applying pending requests for created indices...");
                Iterator<PendingRequest> it = pendings.iterator();
                while (it.hasNext()) {
                    PendingRequest pendingRequest = it.next();
                    // add pending requests for created indices
                    ShardId shardId = shardId(pendingRequest.indexName, pendingRequest.item.id(), pendingRequest.routing);
                    if (shardId == null) {
                        // seems like index is deleted meanwhile, mark item as failed and remove pending
                        indicesDeleted.add(pendingRequest.indexName);
                        it.remove();
                        synchronized (responsesLock) {
                            responses.set(globalCounter.getAndIncrement(), false);
                        }
                        setResultIfDone(1);
                        continue;
                    }
                    partitionRequestByShard(shardId, pendingRequest.item, pendingRequest.routing);
                }
                trace("added %d pending requests, lets see if we can execute them", pendings.size());
                executeRequestsIfNeeded();
            }

            @Override
            public void onFailure(Throwable t) {
                setFailure(t);
            }
        };
        if (indices.isEmpty()) {
            indicesCreatedCallback.onSuccess(null);
        } else {
            transportBulkCreateIndicesAction.execute(bulkCreateIndicesRequest, new ActionListener<BulkCreateIndicesResponse>() {

                @Override
                public void onResponse(BulkCreateIndicesResponse response) {
                    indicesCreated.addAll(indices);
                    indicesCreatedCallback.onSuccess(null);
                }

                @Override
                public void onFailure(Throwable t) {
                    indicesCreatedCallback.onFailure(t);
                }
            });
        }
    }
}
Also used : BulkCreateIndicesResponse(org.elasticsearch.action.admin.indices.create.BulkCreateIndicesResponse) ShardId(org.elasticsearch.index.shard.ShardId) BulkCreateIndicesRequest(org.elasticsearch.action.admin.indices.create.BulkCreateIndicesRequest) FutureCallback(com.google.common.util.concurrent.FutureCallback) Nullable(javax.annotation.Nullable)

Example 5 with FutureCallback

use of com.google.common.util.concurrent.FutureCallback in project cassandra by apache.

the class LocalSessions method handlePrepareMessage.

/**
     * The PrepareConsistentRequest effectively promotes the parent repair session to a consistent
     * incremental session, and begins the 'pending anti compaction' which moves all sstable data
     * that is to be repaired into it's own silo, preventing it from mixing with other data.
     *
     * No response is sent to the repair coordinator until the pending anti compaction has completed
     * successfully. If the pending anti compaction fails, a failure message is sent to the coordinator,
     * cancelling the session.
     */
public void handlePrepareMessage(InetAddress from, PrepareConsistentRequest request) {
    logger.debug("received {} from {}", request, from);
    UUID sessionID = request.parentSession;
    InetAddress coordinator = request.coordinator;
    Set<InetAddress> peers = request.participants;
    ActiveRepairService.ParentRepairSession parentSession;
    try {
        parentSession = getParentRepairSession(sessionID);
    } catch (Throwable e) {
        logger.debug("Error retrieving ParentRepairSession for session {}, responding with failure", sessionID);
        sendMessage(coordinator, new FailSession(sessionID));
        return;
    }
    LocalSession session = createSessionUnsafe(sessionID, parentSession, peers);
    putSessionUnsafe(session);
    logger.debug("created local session for {}", sessionID);
    ExecutorService executor = Executors.newFixedThreadPool(parentSession.getColumnFamilyStores().size());
    ListenableFuture pendingAntiCompaction = submitPendingAntiCompaction(session, executor);
    Futures.addCallback(pendingAntiCompaction, new FutureCallback() {

        public void onSuccess(@Nullable Object result) {
            logger.debug("pending anti-compaction for {} completed", sessionID);
            setStateAndSave(session, PREPARED);
            sendMessage(coordinator, new PrepareConsistentResponse(sessionID, getBroadcastAddress(), true));
            executor.shutdown();
        }

        public void onFailure(Throwable t) {
            logger.debug("pending anti-compaction for {} failed", sessionID);
            failSession(sessionID);
            executor.shutdown();
        }
    });
}
Also used : ActiveRepairService(org.apache.cassandra.service.ActiveRepairService) PrepareConsistentResponse(org.apache.cassandra.repair.messages.PrepareConsistentResponse) FailSession(org.apache.cassandra.repair.messages.FailSession) ExecutorService(java.util.concurrent.ExecutorService) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) UUID(java.util.UUID) InetAddress(java.net.InetAddress) FutureCallback(com.google.common.util.concurrent.FutureCallback)

Aggregations

FutureCallback (com.google.common.util.concurrent.FutureCallback)70 ListenableFuture (com.google.common.util.concurrent.ListenableFuture)33 List (java.util.List)33 ArrayList (java.util.ArrayList)27 Futures (com.google.common.util.concurrent.Futures)24 Logger (org.slf4j.Logger)19 LoggerFactory (org.slf4j.LoggerFactory)19 MoreExecutors (com.google.common.util.concurrent.MoreExecutors)18 IOException (java.io.IOException)18 Set (java.util.Set)17 Collections (java.util.Collections)16 Map (java.util.Map)16 TimeUnit (java.util.concurrent.TimeUnit)16 Optional (com.google.common.base.Optional)15 ExecutionException (java.util.concurrent.ExecutionException)15 Nonnull (javax.annotation.Nonnull)15 Nullable (javax.annotation.Nullable)15 Collection (java.util.Collection)13 Future (java.util.concurrent.Future)13 Collectors (java.util.stream.Collectors)12