use of com.google.common.util.concurrent.ListenableFuture in project buck by facebook.
the class DistBuildFileHashes method ruleKeyComputation.
private static ListenableFuture<ImmutableMap<BuildRule, RuleKey>> ruleKeyComputation(ActionGraph actionGraph, final LoadingCache<ProjectFilesystem, DefaultRuleKeyFactory> ruleKeyFactories, ListeningExecutorService executorService) {
List<ListenableFuture<Map.Entry<BuildRule, RuleKey>>> ruleKeyEntries = new ArrayList<>();
for (final BuildRule rule : actionGraph.getNodes()) {
ruleKeyEntries.add(executorService.submit(() -> Maps.immutableEntry(rule, ruleKeyFactories.get(rule.getProjectFilesystem()).build(rule))));
}
ListenableFuture<List<Map.Entry<BuildRule, RuleKey>>> ruleKeyComputation = Futures.allAsList(ruleKeyEntries);
return Futures.transform(ruleKeyComputation, new Function<List<Map.Entry<BuildRule, RuleKey>>, ImmutableMap<BuildRule, RuleKey>>() {
@Override
public ImmutableMap<BuildRule, RuleKey> apply(List<Map.Entry<BuildRule, RuleKey>> input) {
return ImmutableMap.copyOf(input);
}
}, executorService);
}
use of com.google.common.util.concurrent.ListenableFuture in project buck by facebook.
the class CachingBuildEngine method processBuildRule.
private ListenableFuture<BuildResult> processBuildRule(final BuildRule rule, final BuildEngineBuildContext buildContext, final ExecutionContext executionContext, final OnDiskBuildInfo onDiskBuildInfo, final BuildInfoRecorder buildInfoRecorder, final BuildableContext buildableContext, final ConcurrentLinkedQueue<ListenableFuture<Void>> asyncCallbacks) {
// If we've already seen a failure, exit early.
if (!buildContext.isKeepGoing() && firstFailure != null) {
return Futures.immediateFuture(BuildResult.canceled(rule, firstFailure));
}
final RuleKeyFactories ruleKeyFactory = ruleKeyFactories.apply(rule.getProjectFilesystem());
try (BuildRuleEvent.Scope scope = BuildRuleEvent.resumeSuspendScope(buildContext.getEventBus(), rule, buildRuleDurationTracker, ruleKeyFactory.getDefaultRuleKeyFactory())) {
// 1. Check if it's already built.
Optional<RuleKey> cachedRuleKey = onDiskBuildInfo.getRuleKey(BuildInfo.MetadataKey.RULE_KEY);
final RuleKey defaultRuleKey = ruleKeyFactory.getDefaultRuleKeyFactory().build(rule);
if (defaultRuleKey.equals(cachedRuleKey.orElse(null))) {
return Futures.transform(markRuleAsUsed(rule, buildContext.getEventBus()), Functions.constant(BuildResult.success(rule, BuildRuleSuccessType.MATCHING_RULE_KEY, CacheResult.localKeyUnchangedHit())));
}
// 2. Rule key cache lookup.
ListenableFuture<CacheResult> rulekeyCacheResult = cacheActivityService.submit(() -> {
CacheResult cacheResult = tryToFetchArtifactFromBuildCacheAndOverlayOnTopOfProjectFilesystem(rule, defaultRuleKey, buildContext.getArtifactCache(), // TODO(shs96c): This should be a shared between all tests, not one per cell
rule.getProjectFilesystem(), buildContext);
if (cacheResult.getType().isSuccess()) {
fillMissingBuildMetadataFromCache(cacheResult, buildInfoRecorder, BuildInfo.MetadataKey.INPUT_BASED_RULE_KEY, BuildInfo.MetadataKey.DEP_FILE_RULE_KEY, BuildInfo.MetadataKey.DEP_FILE);
}
return cacheResult;
}, CACHE_CHECK_RESOURCE_AMOUNTS);
return Futures.transformAsync(rulekeyCacheResult, ruleAsyncFunction(rule, buildContext.getEventBus(), (cacheResult) -> handleRuleKeyCacheResult(rule, buildContext, executionContext, onDiskBuildInfo, buildInfoRecorder, buildableContext, asyncCallbacks, ruleKeyFactory, cacheResult)), serviceByAdjustingDefaultWeightsTo(SCHEDULING_MORE_WORK_RESOURCE_AMOUNTS));
}
}
use of com.google.common.util.concurrent.ListenableFuture in project buck by facebook.
the class ConvertingPipeline method getAllNodesJob.
@Override
public ListenableFuture<ImmutableSet<T>> getAllNodesJob(final Cell cell, final Path buildFile) throws BuildTargetException {
// TODO(tophyr): this hits the chained pipeline before hitting the cache
ListenableFuture<List<T>> allNodesListJob = Futures.transformAsync(getItemsToConvert(cell, buildFile), allToConvert -> {
if (shuttingDown()) {
return Futures.immediateCancelledFuture();
}
ImmutableList.Builder<ListenableFuture<T>> allNodeJobs = ImmutableList.builder();
for (final F from : allToConvert) {
if (isValid(from)) {
final BuildTarget target = getBuildTarget(cell.getRoot(), buildFile, from);
allNodeJobs.add(cache.getJobWithCacheLookup(cell, target, () -> {
if (shuttingDown()) {
return Futures.immediateCancelledFuture();
}
return dispatchComputeNode(cell, target, from);
}));
}
}
return Futures.allAsList(allNodeJobs.build());
}, executorService);
return Futures.transform(allNodesListJob, (Function<List<T>, ImmutableSet<T>>) ImmutableSet::copyOf, executorService);
}
use of com.google.common.util.concurrent.ListenableFuture in project buck by facebook.
the class OfflineScribeLogger method sendStoredLogs.
private synchronized void sendStoredLogs() {
ImmutableSortedSet<Path> logsPaths;
try {
if (!filesystem.isDirectory(logDir)) {
// No logs to submit to Scribe.
return;
}
logsPaths = filesystem.getMtimeSortedMatchingDirectoryContents(logDir, LOGFILE_PATTERN);
} catch (Exception e) {
LOG.error(e, "Fetching stored logs list failed.");
return;
}
long totalBytesToSend = 0;
for (Path logPath : logsPaths) {
// Sending should be ceased if storing has been initiated or closing was started.
if (startedStoring || startedClosing) {
break;
}
// Get iterator.
Iterator<ScribeData> it;
File logFile;
try {
logFile = logPath.toFile();
totalBytesToSend += logFile.length();
if (totalBytesToSend > maxScribeOfflineLogsBytes) {
LOG.warn("Total size of offline logs exceeds the limit. Ceasing to send them to Scribe.");
return;
}
InputStream logFileStream;
try {
logFileStream = new BufferedInputStream(new FileInputStream(logFile), BUFFER_SIZE);
} catch (FileNotFoundException e) {
LOG.info(e, "There was a problem getting stream for logfile: %s. Likely logfile was resent and" + "deleted by a concurrent Buck command.", logPath);
continue;
}
it = new ObjectMapper().readValues(new JsonFactory().createParser(logFileStream), ScribeData.class);
} catch (Exception e) {
LOG.error(e, "Failed to initiate reading from: %s. File may be corrupted.", logPath);
continue;
}
// Read and submit.
int scribeLinesInFile = 0;
List<ListenableFuture<Void>> logFutures = new LinkedList<>();
Map<String, CategoryData> logReadData = new HashMap<>();
try {
boolean interrupted = false;
// Read data and build per category clusters - dispatch if needed.
while (it.hasNext()) {
if (startedStoring || startedClosing) {
interrupted = true;
break;
}
ScribeData newData = it.next();
// Prepare map entry for new data (dispatch old data if needed).
if (!logReadData.containsKey(newData.getCategory())) {
logReadData.put(newData.getCategory(), new CategoryData());
}
CategoryData categoryData = logReadData.get(newData.getCategory());
if (categoryData.getLinesBytes() > CLUSTER_DISPATCH_SIZE) {
logFutures.add(scribeLogger.log(newData.getCategory(), categoryData.getLines()));
categoryData.clearData();
}
// Add new data to the cluster for the category.
for (String line : newData.getLines()) {
categoryData.addLine(line);
scribeLinesInFile++;
}
}
// Send remaining data from per category clusters.
if (!interrupted) {
for (Map.Entry<String, CategoryData> logReadDataEntry : logReadData.entrySet()) {
if (startedStoring || startedClosing) {
interrupted = true;
break;
}
List<String> categoryLines = logReadDataEntry.getValue().getLines();
if (categoryLines.size() > 0) {
logFutures.add(scribeLogger.log(logReadDataEntry.getKey(), categoryLines));
}
}
}
if (interrupted) {
LOG.info("Stopped while sending from offline log (it will not be removed): %s.", logPath);
logFutures.clear();
break;
}
} catch (Exception e) {
LOG.error(e, "Error while reading offline log from: %s. This log will not be removed now. If this " + "error reappears in further runs, the file may be corrupted and should be deleted. ", logPath);
logFutures.clear();
continue;
} finally {
logReadData.clear();
}
// Confirm data was successfully sent and remove logfile.
try {
Futures.allAsList(logFutures).get(LOG_TIMEOUT, LOG_TIMEOUT_UNIT);
totalBytesResent.inc(logFile.length());
totalLinesResent.inc(scribeLinesInFile);
logfilesResent.inc();
try {
filesystem.deleteFileAtPathIfExists(logPath);
} catch (Exception e) {
LOG.error(e, "Failed to remove successfully resent offline log. Stopping sending.");
break;
}
} catch (Exception e) {
LOG.info("Failed to send all data from offline log: %s. Log will not be removed.", logPath);
// Do not attempt to send data from further logfiles - likely there are network issues.
break;
} finally {
logFutures.clear();
}
}
}
use of com.google.common.util.concurrent.ListenableFuture in project jersey by jersey.
the class ListenableFutureAgentResource method recommended.
private ListenableFuture<AgentResponse> recommended(final AgentResponse response) {
destination.register(RxListenableFutureInvokerProvider.class);
// Get a list of recommended destinations ...
final ListenableFuture<List<Destination>> destinations = destination.path("recommended").request().header("Rx-User", "Guava").rx(RxListenableFutureInvoker.class).get(new GenericType<List<Destination>>() {
});
// ... transform them to Recommendation instances ...
final ListenableFuture<List<Recommendation>> recommendations = Futures.transform(destinations, (AsyncFunction<List<Destination>, List<Recommendation>>) destinationList -> {
final List<Recommendation> recommendationList = Lists.newArrayList(Lists.transform(destinationList, destination -> new Recommendation(destination.getDestination(), null, 0)));
return Futures.immediateFuture(recommendationList);
});
// ... add forecasts and calculations ...
final ListenableFuture<List<List<Recommendation>>> filledRecommendations = Futures.successfulAsList(Arrays.asList(// Add Forecasts to Recommendations.
forecasts(recommendations), // Add Forecasts to Recommendations.
calculations(recommendations)));
// ... and transform the list into agent response with filled recommendations.
return Futures.transform(filledRecommendations, (AsyncFunction<List<List<Recommendation>>, AgentResponse>) input -> {
response.setRecommended(input.get(0));
return Futures.immediateFuture(response);
});
}
Aggregations