use of com.google.common.util.concurrent.ListenableFuture in project druid by druid-io.
the class CachingQueryRunner method run.
@Override
public Sequence<T> run(Query<T> query, Map<String, Object> responseContext) {
final CacheStrategy strategy = toolChest.getCacheStrategy(query);
final boolean populateCache = CacheUtil.populateCacheOnDataNodes(query, strategy, cacheConfig);
final boolean useCache = CacheUtil.useCacheOnDataNodes(query, strategy, cacheConfig);
final Cache.NamedKey key;
if (strategy != null && (useCache || populateCache)) {
key = CacheUtil.computeSegmentCacheKey(segmentIdentifier, segmentDescriptor, strategy.computeCacheKey(query));
} else {
key = null;
}
if (useCache) {
final Function cacheFn = strategy.pullFromCache();
final byte[] cachedResult = cache.get(key);
if (cachedResult != null) {
final TypeReference cacheObjectClazz = strategy.getCacheObjectClazz();
return Sequences.map(new BaseSequence<>(new BaseSequence.IteratorMaker<T, Iterator<T>>() {
@Override
public Iterator<T> make() {
try {
if (cachedResult.length == 0) {
return Iterators.emptyIterator();
}
return mapper.readValues(mapper.getFactory().createParser(cachedResult), cacheObjectClazz);
} catch (IOException e) {
throw Throwables.propagate(e);
}
}
@Override
public void cleanup(Iterator<T> iterFromMake) {
}
}), cacheFn);
}
}
final Collection<ListenableFuture<?>> cacheFutures = Collections.synchronizedList(Lists.<ListenableFuture<?>>newLinkedList());
if (populateCache) {
final Function cacheFn = strategy.prepareForCache();
return Sequences.withEffect(Sequences.map(base.run(query, responseContext), new Function<T, T>() {
@Override
public T apply(final T input) {
final SettableFuture<Object> future = SettableFuture.create();
cacheFutures.add(future);
backgroundExecutorService.submit(new Runnable() {
@Override
public void run() {
try {
future.set(cacheFn.apply(input));
} catch (Exception e) {
// if there is exception, should setException to quit the caching processing
future.setException(e);
}
}
});
return input;
}
}), new Runnable() {
@Override
public void run() {
try {
CacheUtil.populate(cache, mapper, key, Futures.allAsList(cacheFutures).get());
} catch (Exception e) {
log.error(e, "Error while getting future for cache task");
throw Throwables.propagate(e);
}
}
}, backgroundExecutorService);
} else {
return base.run(query, responseContext);
}
}
use of com.google.common.util.concurrent.ListenableFuture in project druid by druid-io.
the class FiniteAppenderatorDriver method publishAll.
/**
* Push and publish all segments to the metadata store.
*
* @param publisher segment publisher
* @param wrappedCommitter wrapped committer (from wrapCommitter)
*
* @return published segments and metadata, or null if segments could not be published due to transaction failure
* with commit metadata.
*/
private SegmentsAndMetadata publishAll(final TransactionalSegmentPublisher publisher, final Committer wrappedCommitter) throws InterruptedException {
final List<SegmentIdentifier> theSegments = ImmutableList.copyOf(appenderator.getSegments());
long nTry = 0;
while (true) {
try {
log.info("Pushing segments: [%s]", Joiner.on(", ").join(theSegments));
final SegmentsAndMetadata segmentsAndMetadata = appenderator.push(theSegments, wrappedCommitter).get();
// Sanity check
if (!segmentsToIdentifiers(segmentsAndMetadata.getSegments()).equals(Sets.newHashSet(theSegments))) {
throw new ISE("WTF?! Pushed different segments than requested. Pushed[%s], requested[%s].", Joiner.on(", ").join(identifiersToStrings(segmentsToIdentifiers(segmentsAndMetadata.getSegments()))), Joiner.on(", ").join(identifiersToStrings(theSegments)));
}
log.info("Publishing segments with commitMetadata[%s]: [%s]", segmentsAndMetadata.getCommitMetadata(), Joiner.on(", ").join(segmentsAndMetadata.getSegments()));
if (segmentsAndMetadata.getSegments().isEmpty()) {
log.info("Nothing to publish, skipping publish step.");
} else {
final boolean published = publisher.publishSegments(ImmutableSet.copyOf(segmentsAndMetadata.getSegments()), ((FiniteAppenderatorDriverMetadata) segmentsAndMetadata.getCommitMetadata()).getCallerMetadata());
if (published) {
log.info("Published segments, awaiting handoff.");
} else {
log.info("Transaction failure while publishing segments, checking if someone else beat us to it.");
if (usedSegmentChecker.findUsedSegments(segmentsToIdentifiers(segmentsAndMetadata.getSegments())).equals(Sets.newHashSet(segmentsAndMetadata.getSegments()))) {
log.info("Our segments really do exist, awaiting handoff.");
} else {
log.warn("Our segments don't exist, giving up.");
return null;
}
}
}
for (final DataSegment dataSegment : segmentsAndMetadata.getSegments()) {
handoffNotifier.registerSegmentHandoffCallback(new SegmentDescriptor(dataSegment.getInterval(), dataSegment.getVersion(), dataSegment.getShardSpec().getPartitionNum()), MoreExecutors.sameThreadExecutor(), new Runnable() {
@Override
public void run() {
final SegmentIdentifier identifier = SegmentIdentifier.fromDataSegment(dataSegment);
log.info("Segment[%s] successfully handed off, dropping.", identifier);
metrics.incrementHandOffCount();
final ListenableFuture<?> dropFuture = appenderator.drop(identifier);
Futures.addCallback(dropFuture, new FutureCallback<Object>() {
@Override
public void onSuccess(Object result) {
synchronized (handoffMonitor) {
handoffMonitor.notifyAll();
}
}
@Override
public void onFailure(Throwable e) {
log.warn(e, "Failed to drop segment[%s]?!");
synchronized (handoffMonitor) {
handoffMonitor.notifyAll();
}
}
});
}
});
}
return segmentsAndMetadata;
} catch (InterruptedException e) {
throw e;
} catch (Exception e) {
final long sleepMillis = computeNextRetrySleep(++nTry);
log.warn(e, "Failed publishAll (try %d), retrying in %,dms.", nTry, sleepMillis);
Thread.sleep(sleepMillis);
}
}
}
use of com.google.common.util.concurrent.ListenableFuture in project druid by druid-io.
the class QueryManager method cancelQuery.
public boolean cancelQuery(String id) {
queryDatasources.removeAll(id);
Set<ListenableFuture> futures = queries.removeAll(id);
boolean success = true;
for (ListenableFuture future : futures) {
success = success && future.cancel(true);
}
return success;
}
use of com.google.common.util.concurrent.ListenableFuture in project druid by druid-io.
the class LookupCoordinatorManager method updateNodes.
void updateNodes(Collection<URL> urls, final Map<String, Map<String, Object>> knownLookups) throws IOException, InterruptedException, ExecutionException {
if (knownLookups == null) {
LOG.debug("No config for lookups found");
return;
}
if (knownLookups.isEmpty()) {
LOG.debug("No known lookups. Skipping update");
return;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Updating %d lookups on %d nodes", knownLookups.size(), urls.size());
}
final List<ListenableFuture<?>> futures = new ArrayList<>(urls.size());
for (final URL url : urls) {
futures.add(executorService.submit(new Runnable() {
@Override
public void run() {
try {
updateAllOnHost(url, knownLookups);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
LOG.warn("Update on [%s] interrupted", url);
throw Throwables.propagate(e);
} catch (IOException | ExecutionException e) {
// Don't raise as ExecutionException. Just log and continue
LOG.makeAlert(e, "Error submitting to [%s]", url).emit();
}
}
}));
}
final ListenableFuture allFuture = Futures.allAsList(futures);
try {
allFuture.get(lookupCoordinatorManagerConfig.getUpdateAllTimeout().getMillis(), TimeUnit.MILLISECONDS);
} catch (TimeoutException e) {
LOG.warn("Timeout in updating hosts! Attempting to cancel");
// This should cause Interrupted exceptions on the offending ones
allFuture.cancel(true);
}
}
use of com.google.common.util.concurrent.ListenableFuture in project druid by druid-io.
the class LookupCoordinatorManager method deleteAllOnTier.
void deleteAllOnTier(final String tier, final Collection<String> dropLookups) throws ExecutionException, InterruptedException, IOException {
if (dropLookups.isEmpty()) {
LOG.debug("Nothing to drop");
return;
}
final Collection<URL> urls = getAllHostsAnnounceEndpoint(tier);
final List<ListenableFuture<?>> futures = new ArrayList<>(urls.size());
for (final URL url : urls) {
futures.add(executorService.submit(new Runnable() {
@Override
public void run() {
for (final String drop : dropLookups) {
final URL lookupURL;
try {
lookupURL = new URL(url.getProtocol(), url.getHost(), url.getPort(), String.format("%s/%s", url.getFile(), drop));
} catch (MalformedURLException e) {
throw new ISE(e, "Error creating url for [%s]/[%s]", url, drop);
}
try {
deleteOnHost(lookupURL);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
LOG.warn("Delete [%s] interrupted", lookupURL);
throw Throwables.propagate(e);
} catch (IOException | ExecutionException e) {
// Don't raise as ExecutionException. Just log and continue
LOG.makeAlert(e, "Error deleting [%s]", lookupURL).emit();
}
}
}
}));
}
final ListenableFuture allFuture = Futures.allAsList(futures);
try {
allFuture.get(lookupCoordinatorManagerConfig.getUpdateAllTimeout().getMillis(), TimeUnit.MILLISECONDS);
} catch (TimeoutException e) {
// This should cause Interrupted exceptions on the offending ones
allFuture.cancel(true);
throw new ExecutionException("Timeout in updating hosts! Attempting to cancel", e);
}
}
Aggregations