use of java.util.concurrent.RecursiveAction in project hadoop by apache.
the class FSDirectory method updateCountForQuota.
/**
* Update the count of each directory with quota in the namespace.
* A directory's count is defined as the total number inodes in the tree
* rooted at the directory.
*
* This is an update of existing state of the filesystem and does not
* throw QuotaExceededException.
*/
void updateCountForQuota(int initThreads) {
writeLock();
try {
int threads = (initThreads < 1) ? 1 : initThreads;
LOG.info("Initializing quota with " + threads + " thread(s)");
long start = Time.now();
QuotaCounts counts = new QuotaCounts.Builder().build();
ForkJoinPool p = new ForkJoinPool(threads);
RecursiveAction task = new InitQuotaTask(getBlockStoragePolicySuite(), rootDir.getStoragePolicyID(), rootDir, counts);
p.execute(task);
task.join();
p.shutdown();
LOG.info("Quota initialization completed in " + (Time.now() - start) + " milliseconds\n" + counts);
} finally {
writeUnlock();
}
}
use of java.util.concurrent.RecursiveAction in project buck by facebook.
the class IJProjectCleaner method clean.
@SuppressWarnings("serial")
public void clean(final BuckConfig buckConfig, final Path librariesXmlBase, final boolean runPostGenerationCleaner, final boolean removeOldLibraries) {
if (!runPostGenerationCleaner && !removeOldLibraries) {
return;
}
final Set<File> buckDirectories = new HashSet<>();
buckDirectories.add(convertPathToFile(projectFilesystem.resolve(projectFilesystem.getBuckPaths().getBuckOut())));
ArtifactCacheBuckConfig cacheBuckConfig = new ArtifactCacheBuckConfig(buckConfig);
for (DirCacheEntry entry : cacheBuckConfig.getDirCacheEntries()) {
buckDirectories.add(convertPathToFile(entry.getCacheDir()));
}
ForkJoinPool cleanExecutor = new ForkJoinPool(getParallelismLimit());
try {
cleanExecutor.invoke(new RecursiveAction() {
@Override
protected void compute() {
List<RecursiveAction> topLevelTasks = new ArrayList<>(2);
if (runPostGenerationCleaner) {
topLevelTasks.add(new CandidateFinderWithExclusions(convertPathToFile(projectFilesystem.resolve("")), IML_FILENAME_FILTER, buckDirectories));
}
topLevelTasks.add(new CandidateFinder(convertPathToFile(librariesXmlBase), XML_FILENAME_FILTER));
invokeAll(topLevelTasks);
}
});
} finally {
cleanExecutor.shutdown();
try {
cleanExecutor.awaitTermination(EXECUTOR_SHUTDOWN_TIMEOUT, EXECUTOR_SHUTDOWN_TIME_UNIT);
} catch (InterruptedException e) {
Logger.get(IJProjectCleaner.class).warn("Timeout during executor shutdown.", e);
}
}
}
Aggregations