use of com.google.common.util.concurrent.Futures in project intellij by bazelbuild.
the class DirectoryStructure method walkDirectoryStructure.
private static ListenableFuture<PathStructurePair> walkDirectoryStructure(WorkspaceRoot workspaceRoot, Set<WorkspacePath> excludeDirectories, FileOperationProvider fileOperationProvider, ListeningExecutorService executorService, WorkspacePath workspacePath) {
if (excludeDirectories.contains(workspacePath)) {
return Futures.immediateFuture(null);
}
File file = workspaceRoot.fileForPath(workspacePath);
if (!fileOperationProvider.isDirectory(file)) {
return Futures.immediateFuture(null);
}
ListenableFuture<File[]> childrenFuture = executorService.submit(() -> fileOperationProvider.listFiles(file));
return Futures.transformAsync(childrenFuture, children -> {
if (children == null) {
return Futures.immediateFuture(null);
}
List<ListenableFuture<PathStructurePair>> futures = Lists.newArrayListWithExpectedSize(children.length);
for (File child : children) {
WorkspacePath childWorkspacePath;
try {
childWorkspacePath = workspaceRoot.workspacePathFor(child);
} catch (IllegalArgumentException e) {
// stop at directories with unhandled characters.
continue;
}
futures.add(walkDirectoryStructure(workspaceRoot, excludeDirectories, fileOperationProvider, executorService, childWorkspacePath));
}
return Futures.transform(Futures.allAsList(futures), (Function<List<PathStructurePair>, PathStructurePair>) pairs -> {
Builder<WorkspacePath, DirectoryStructure> result = ImmutableMap.builder();
for (PathStructurePair pair : pairs) {
if (pair != null) {
result.put(pair.path, pair.directoryStructure);
}
}
return new PathStructurePair(workspacePath, new DirectoryStructure(result.build()));
}, executorService);
}, executorService);
}
use of com.google.common.util.concurrent.Futures in project presto by prestodb.
the class HivePageSink method doFinish.
private ListenableFuture<Collection<Slice>> doFinish() {
ImmutableList.Builder<PartitionUpdate> partitionUpdatesBuilder = ImmutableList.builder();
List<Callable<Object>> verificationTasks = new ArrayList<>();
for (HiveWriter writer : writers) {
writer.commit();
partitionUpdatesBuilder.add(writer.getPartitionUpdate());
writer.getVerificationTask().map(Executors::callable).ifPresent(verificationTasks::add);
}
List<PartitionUpdate> partitionUpdates = partitionUpdatesBuilder.build();
boolean optimizedPartitionUpdateSerializationEnabled = isOptimizedPartitionUpdateSerializationEnabled(session);
if (optimizedPartitionUpdateSerializationEnabled) {
// Merge multiple partition updates for a single partition into one.
// Multiple partition updates for a single partition are produced when writing into a bucketed table.
// Merged partition updates will contain multiple items in the fileWriteInfos list (one per bucket).
// This optimization should be enabled only together with the optimized serialization (compression + binary encoding).
// Since serialized fragments will be transmitted as Presto pages serializing a merged partition update to JSON without
// compression is unsafe, as it may cross the maximum page size limit.
partitionUpdates = mergePartitionUpdates(partitionUpdates);
}
ImmutableList.Builder<Slice> serializedPartitionUpdatesBuilder = ImmutableList.builder();
for (PartitionUpdate partitionUpdate : partitionUpdates) {
byte[] serializedBytes;
if (optimizedPartitionUpdateSerializationEnabled) {
serializedBytes = serializeZstdCompressed(partitionUpdateSmileCodec, partitionUpdate);
} else {
serializedBytes = partitionUpdateCodec.toBytes(partitionUpdate);
}
serializedPartitionUpdatesBuilder.add(wrappedBuffer(serializedBytes));
}
List<Slice> serializedPartitionUpdates = serializedPartitionUpdatesBuilder.build();
writtenBytes = writers.stream().mapToLong(HiveWriter::getWrittenBytes).sum();
validationCpuNanos = writers.stream().mapToLong(HiveWriter::getValidationCpuNanos).sum();
if (waitForFileRenaming && verificationTasks.isEmpty()) {
// Use CopyOnWriteArrayList to prevent race condition when callbacks try to add partitionUpdates to this list
List<Slice> partitionUpdatesWithRenamedFileNames = new CopyOnWriteArrayList<>();
List<ListenableFuture<?>> futures = new ArrayList<>();
for (int i = 0; i < writers.size(); i++) {
int writerIndex = i;
ListenableFuture<?> fileNameFuture = toListenableFuture(hiveMetadataUpdater.getMetadataResult(writerIndex));
SettableFuture renamingFuture = SettableFuture.create();
futures.add(renamingFuture);
addSuccessCallback(fileNameFuture, obj -> renameFiles((String) obj, writerIndex, renamingFuture, partitionUpdatesWithRenamedFileNames));
}
return Futures.transform(Futures.allAsList(futures), input -> partitionUpdatesWithRenamedFileNames, directExecutor());
}
if (verificationTasks.isEmpty()) {
return Futures.immediateFuture(serializedPartitionUpdates);
}
try {
List<ListenableFuture<?>> futures = writeVerificationExecutor.invokeAll(verificationTasks).stream().map(future -> (ListenableFuture<?>) future).collect(toList());
return Futures.transform(Futures.allAsList(futures), input -> serializedPartitionUpdates, directExecutor());
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
}
use of com.google.common.util.concurrent.Futures in project druid by druid-io.
the class ConcurrentGrouper method parallelSortAndGetGroupersIterator.
private List<CloseableIterator<Entry<KeyType>>> parallelSortAndGetGroupersIterator() {
// The number of groupers is same with the number of processing threads in the executor
final List<ListenableFuture<CloseableIterator<Entry<KeyType>>>> futures = groupers.stream().map(grouper -> executor.submit(new AbstractPrioritizedCallable<CloseableIterator<Entry<KeyType>>>(priority) {
@Override
public CloseableIterator<Entry<KeyType>> call() {
return grouper.iterator(true);
}
})).collect(Collectors.toList());
ListenableFuture<List<CloseableIterator<Entry<KeyType>>>> future = Futures.allAsList(futures);
try {
final long timeout = queryTimeoutAt - System.currentTimeMillis();
return hasQueryTimeout ? future.get(timeout, TimeUnit.MILLISECONDS) : future.get();
} catch (InterruptedException | CancellationException e) {
GuavaUtils.cancelAll(true, future, futures);
throw new QueryInterruptedException(e);
} catch (TimeoutException e) {
GuavaUtils.cancelAll(true, future, futures);
throw new QueryTimeoutException();
} catch (ExecutionException e) {
GuavaUtils.cancelAll(true, future, futures);
throw new RuntimeException(e.getCause());
}
}
use of com.google.common.util.concurrent.Futures in project buck by facebook.
the class Build method executeBuild.
/**
* If {@code isKeepGoing} is false, then this returns a future that succeeds only if all of
* {@code rulesToBuild} build successfully. Otherwise, this returns a future that should always
* succeed, even if individual rules fail to build. In that case, a failed build rule is indicated
* by a {@code null} value in the corresponding position in the iteration order of
* {@code rulesToBuild}.
* @param targetish The targets to build. All targets in this iterable must be unique.
*/
@SuppressWarnings("PMD.EmptyCatchBlock")
public BuildExecutionResult executeBuild(Iterable<? extends BuildTarget> targetish, boolean isKeepGoing) throws IOException, ExecutionException, InterruptedException {
BuildId buildId = executionContext.getBuildId();
BuildEngineBuildContext buildContext = BuildEngineBuildContext.builder().setBuildContext(BuildContext.builder().setActionGraph(actionGraph).setSourcePathResolver(new SourcePathResolver(new SourcePathRuleFinder(ruleResolver))).setJavaPackageFinder(javaPackageFinder).setEventBus(executionContext.getBuckEventBus()).setAndroidPlatformTargetSupplier(executionContext.getAndroidPlatformTargetSupplier()).build()).setClock(clock).setArtifactCache(artifactCache).setBuildId(buildId).setObjectMapper(objectMapper).putAllEnvironment(executionContext.getEnvironment()).setKeepGoing(isKeepGoing).build();
// It is important to use this logic to determine the set of rules to build rather than
// build.getActionGraph().getNodesWithNoIncomingEdges() because, due to graph enhancement,
// there could be disconnected subgraphs in the DependencyGraph that we do not want to build.
ImmutableSet<BuildTarget> targetsToBuild = StreamSupport.stream(targetish.spliterator(), false).collect(MoreCollectors.toImmutableSet());
// It is important to use this logic to determine the set of rules to build rather than
// build.getActionGraph().getNodesWithNoIncomingEdges() because, due to graph enhancement,
// there could be disconnected subgraphs in the DependencyGraph that we do not want to build.
ImmutableList<BuildRule> rulesToBuild = ImmutableList.copyOf(targetsToBuild.stream().map(buildTarget -> {
try {
return getRuleResolver().requireRule(buildTarget);
} catch (NoSuchBuildTargetException e) {
throw new HumanReadableException("No build rule found for target %s", buildTarget);
}
}).collect(MoreCollectors.toImmutableSet()));
// Calculate and post the number of rules that need to built.
int numRules = buildEngine.getNumRulesToBuild(rulesToBuild);
getExecutionContext().getBuckEventBus().post(BuildEvent.ruleCountCalculated(targetsToBuild, numRules));
// Setup symlinks required when configuring the output path.
createConfiguredBuckOutSymlinks();
List<ListenableFuture<BuildResult>> futures = rulesToBuild.stream().map(rule -> buildEngine.build(buildContext, executionContext, rule)).collect(MoreCollectors.toImmutableList());
// Get the Future representing the build and then block until everything is built.
ListenableFuture<List<BuildResult>> buildFuture = Futures.allAsList(futures);
List<BuildResult> results;
try {
results = buildFuture.get();
if (!isKeepGoing) {
for (BuildResult result : results) {
Throwable thrown = result.getFailure();
if (thrown != null) {
throw new ExecutionException(thrown);
}
}
}
} catch (ExecutionException | InterruptedException | RuntimeException e) {
Throwable t = Throwables.getRootCause(e);
if (e instanceof InterruptedException || t instanceof InterruptedException || t instanceof ClosedByInterruptException) {
try {
buildFuture.cancel(true);
} catch (CancellationException ignored) {
// Rethrow original InterruptedException instead.
}
Thread.currentThread().interrupt();
}
throw e;
}
// Insertion order matters
LinkedHashMap<BuildRule, Optional<BuildResult>> resultBuilder = new LinkedHashMap<>();
Preconditions.checkState(rulesToBuild.size() == results.size());
for (int i = 0, len = rulesToBuild.size(); i < len; i++) {
BuildRule rule = rulesToBuild.get(i);
resultBuilder.put(rule, Optional.ofNullable(results.get(i)));
}
return BuildExecutionResult.builder().setFailures(FluentIterable.from(results).filter(input -> input.getSuccess() == null)).setResults(resultBuilder).build();
}
use of com.google.common.util.concurrent.Futures in project metacat by Netflix.
the class ElasticSearchMetacatRefresh method _processPartitions.
@SuppressWarnings("checkstyle:methodname")
private ListenableFuture<Void> _processPartitions(final List<QualifiedName> qNames) {
final List<QualifiedName> excludeQualifiedNames = config.getElasticSearchRefreshExcludeQualifiedNames();
final List<String> tables = elasticSearchUtil.getTableIdsByCatalogs(ElasticSearchDoc.Type.table.name(), qNames, excludeQualifiedNames);
final List<ListenableFuture<ListenableFuture<Void>>> futures = tables.stream().map(s -> service.submit(() -> {
final QualifiedName tableName = QualifiedName.fromString(s, false);
final List<ListenableFuture<Void>> indexFutures = Lists.newArrayList();
int offset = 0;
int count;
final Sort sort;
if ("s3".equals(tableName.getCatalogName()) || "aegisthus".equals(tableName.getCatalogName())) {
sort = new Sort("id", SortOrder.ASC);
} else {
sort = new Sort("part_id", SortOrder.ASC);
}
final Pageable pageable = new Pageable(10000, offset);
do {
final List<PartitionDto> partitionDtos = partitionService.list(tableName, null, null, sort, pageable, true, true, true);
count = partitionDtos.size();
if (!partitionDtos.isEmpty()) {
final List<List<PartitionDto>> partitionedPartitionDtos = Lists.partition(partitionDtos, 1000);
partitionedPartitionDtos.forEach(subPartitionsDtos -> indexFutures.add(indexPartitionDtos(tableName, subPartitionsDtos)));
offset = offset + count;
pageable.setOffset(offset);
}
} while (count == 10000);
return Futures.transform(Futures.successfulAsList(indexFutures), Functions.constant((Void) null));
})).collect(Collectors.toList());
final ListenableFuture<Void> processPartitionsFuture = Futures.transformAsync(Futures.successfulAsList(futures), input -> {
final List<ListenableFuture<Void>> inputFuturesWithoutNulls = input.stream().filter(NOT_NULL).collect(Collectors.toList());
return Futures.transform(Futures.successfulAsList(inputFuturesWithoutNulls), Functions.constant(null));
});
return Futures.transformAsync(processPartitionsFuture, input -> {
elasticSearchUtil.refresh();
final List<ListenableFuture<Void>> cleanUpFutures = tables.stream().map(s -> service.submit(() -> partitionsCleanUp(QualifiedName.fromString(s, false), excludeQualifiedNames))).collect(Collectors.toList());
return Futures.transform(Futures.successfulAsList(cleanUpFutures), Functions.constant(null));
});
}
Aggregations