use of java.util.concurrent.ForkJoinPool in project hadoop by apache.
the class FSDirectory method updateCountForQuota.
/**
* Update the count of each directory with quota in the namespace.
* A directory's count is defined as the total number inodes in the tree
* rooted at the directory.
*
* This is an update of existing state of the filesystem and does not
* throw QuotaExceededException.
*/
void updateCountForQuota(int initThreads) {
writeLock();
try {
int threads = (initThreads < 1) ? 1 : initThreads;
LOG.info("Initializing quota with " + threads + " thread(s)");
long start = Time.now();
QuotaCounts counts = new QuotaCounts.Builder().build();
ForkJoinPool p = new ForkJoinPool(threads);
RecursiveAction task = new InitQuotaTask(getBlockStoragePolicySuite(), rootDir.getStoragePolicyID(), rootDir, counts);
p.execute(task);
task.join();
p.shutdown();
LOG.info("Quota initialization completed in " + (Time.now() - start) + " milliseconds\n" + counts);
} finally {
writeUnlock();
}
}
use of java.util.concurrent.ForkJoinPool in project hbase by apache.
the class MultiThreadedClientExample method run.
@Override
public int run(String[] args) throws Exception {
if (args.length < 1 || args.length > 2) {
System.out.println("Usage: " + this.getClass().getName() + " tableName [num_operations]");
return -1;
}
final TableName tableName = TableName.valueOf(args[0]);
int numOperations = DEFAULT_NUM_OPERATIONS;
// the second arg is the number of operations to send.
if (args.length == 2) {
numOperations = Integer.parseInt(args[1]);
}
// Threads for the client only.
//
// We don't want to mix hbase and business logic.
//
ExecutorService service = new ForkJoinPool(threads * 2);
// Create two different connections showing how it's possible to
// separate different types of requests onto different connections
final Connection writeConnection = ConnectionFactory.createConnection(getConf(), service);
final Connection readConnection = ConnectionFactory.createConnection(getConf(), service);
// At this point the entire cache for the region locations is full.
// Only do this if the number of regions in a table is easy to fit into memory.
//
// If you are interacting with more than 25k regions on a client then it's probably not good
// to do this at all.
warmUpConnectionCache(readConnection, tableName);
warmUpConnectionCache(writeConnection, tableName);
List<Future<Boolean>> futures = new ArrayList<>(numOperations);
for (int i = 0; i < numOperations; i++) {
double r = ThreadLocalRandom.current().nextDouble();
Future<Boolean> f;
// These callables are meant to represent real work done by your application.
if (r < .30) {
f = internalPool.submit(new WriteExampleCallable(writeConnection, tableName));
} else if (r < .50) {
f = internalPool.submit(new SingleWriteExampleCallable(writeConnection, tableName));
} else {
f = internalPool.submit(new ReadExampleCallable(writeConnection, tableName));
}
futures.add(f);
}
// Wait a long time for all the reads/writes to complete
for (Future<Boolean> f : futures) {
f.get(10, TimeUnit.MINUTES);
}
// Clean up after our selves for cleanliness
internalPool.shutdownNow();
service.shutdownNow();
return 0;
}
use of java.util.concurrent.ForkJoinPool in project buck by facebook.
the class IJProjectCleaner method clean.
@SuppressWarnings("serial")
public void clean(final BuckConfig buckConfig, final Path librariesXmlBase, final boolean runPostGenerationCleaner, final boolean removeOldLibraries) {
if (!runPostGenerationCleaner && !removeOldLibraries) {
return;
}
final Set<File> buckDirectories = new HashSet<>();
buckDirectories.add(convertPathToFile(projectFilesystem.resolve(projectFilesystem.getBuckPaths().getBuckOut())));
ArtifactCacheBuckConfig cacheBuckConfig = new ArtifactCacheBuckConfig(buckConfig);
for (DirCacheEntry entry : cacheBuckConfig.getDirCacheEntries()) {
buckDirectories.add(convertPathToFile(entry.getCacheDir()));
}
ForkJoinPool cleanExecutor = new ForkJoinPool(getParallelismLimit());
try {
cleanExecutor.invoke(new RecursiveAction() {
@Override
protected void compute() {
List<RecursiveAction> topLevelTasks = new ArrayList<>(2);
if (runPostGenerationCleaner) {
topLevelTasks.add(new CandidateFinderWithExclusions(convertPathToFile(projectFilesystem.resolve("")), IML_FILENAME_FILTER, buckDirectories));
}
topLevelTasks.add(new CandidateFinder(convertPathToFile(librariesXmlBase), XML_FILENAME_FILTER));
invokeAll(topLevelTasks);
}
});
} finally {
cleanExecutor.shutdown();
try {
cleanExecutor.awaitTermination(EXECUTOR_SHUTDOWN_TIMEOUT, EXECUTOR_SHUTDOWN_TIME_UNIT);
} catch (InterruptedException e) {
Logger.get(IJProjectCleaner.class).warn("Timeout during executor shutdown.", e);
}
}
}
use of java.util.concurrent.ForkJoinPool in project buck by facebook.
the class PythonLibraryDescriptionTest method versionedSrcs.
@Test
public void versionedSrcs() throws Exception {
BuildTarget target = BuildTargetFactory.newInstance("//foo:lib");
SourcePath matchedSource = new FakeSourcePath("foo/a.py");
SourcePath unmatchedSource = new FakeSourcePath("foo/b.py");
GenruleBuilder transitiveDepBuilder = GenruleBuilder.newGenruleBuilder(BuildTargetFactory.newInstance("//:tdep")).setOut("out");
VersionedAliasBuilder depBuilder = new VersionedAliasBuilder(BuildTargetFactory.newInstance("//:dep")).setVersions(ImmutableMap.of(Version.of("1.0"), transitiveDepBuilder.getTarget(), Version.of("2.0"), transitiveDepBuilder.getTarget()));
AbstractNodeBuilder<?, ?, ?> builder = new PythonLibraryBuilder(target).setVersionedSrcs(VersionMatchedCollection.<SourceList>builder().add(ImmutableMap.of(depBuilder.getTarget(), Version.of("1.0")), SourceList.ofUnnamedSources(ImmutableSortedSet.of(matchedSource))).add(ImmutableMap.of(depBuilder.getTarget(), Version.of("2.0")), SourceList.ofUnnamedSources(ImmutableSortedSet.of(unmatchedSource))).build());
TargetGraph targetGraph = VersionedTargetGraphBuilder.transform(new FixedVersionSelector(ImmutableMap.of(builder.getTarget(), ImmutableMap.of(depBuilder.getTarget(), Version.of("1.0")))), TargetGraphAndBuildTargets.of(TargetGraphFactory.newInstance(transitiveDepBuilder.build(), depBuilder.build(), builder.build()), ImmutableSet.of(builder.getTarget())), new ForkJoinPool()).getTargetGraph();
BuildRuleResolver resolver = new BuildRuleResolver(targetGraph, new DefaultTargetNodeToBuildRuleTransformer());
PythonLibrary library = (PythonLibrary) resolver.requireRule(builder.getTarget());
assertThat(library.getPythonPackageComponents(PythonTestUtils.PYTHON_PLATFORM, CxxPlatformUtils.DEFAULT_PLATFORM).getModules().values(), Matchers.contains(matchedSource));
}
use of java.util.concurrent.ForkJoinPool in project hibernate-orm by hibernate.
the class CorrectnessTestCase method test.
@Test
public void test() throws Exception {
ExecutorService exec = Executors.newFixedThreadPool(NUM_THREADS);
Map<Integer, List<Log<String>>> allFamilyNames = new HashMap<>();
Map<Integer, List<Log<Set<String>>>> allFamilyMembers = new HashMap<>();
running = true;
List<Future<Void>> futures = new ArrayList<>();
for (int node = 0; node < NUM_NODES; ++node) {
final int NODE = node;
for (int i = 0; i < NUM_THREADS_PER_NODE; ++i) {
final int I = i;
futures.add(exec.submit(() -> {
Thread.currentThread().setName("Node" + (char) ('A' + NODE) + "-thread-" + I);
threadNode.set(NODE);
while (running) {
Operation operation;
if (familyIds.size() < NUM_FAMILIES) {
operation = new InsertFamily(ThreadLocalRandom.current().nextInt(5) == 0);
} else {
operation = getOperation();
}
try {
operation.run();
} catch (Exception e) {
// ignore exceptions from optimistic failures and induced exceptions
if (hasCause(e, InducedException.class)) {
continue;
} else if (Stream.of(EXPECTED).anyMatch(exceptions -> matches(e, exceptions))) {
continue;
}
exceptions.add(e);
log.error("Failed " + operation.getClass().getName(), e);
}
}
synchronized (allFamilyNames) {
for (Map.Entry<Integer, List<Log<String>>> entry : familyNames.get().entrySet()) {
List<Log<String>> list = allFamilyNames.get(entry.getKey());
if (list == null)
allFamilyNames.put(entry.getKey(), list = new ArrayList<>());
list.addAll(entry.getValue());
}
for (Map.Entry<Integer, List<Log<Set<String>>>> entry : familyMembers.get().entrySet()) {
List<Log<Set<String>>> list = allFamilyMembers.get(entry.getKey());
if (list == null)
allFamilyMembers.put(entry.getKey(), list = new ArrayList<>());
list.addAll(entry.getValue());
}
}
return null;
}));
}
}
Exception failure = exceptions.poll(EXECUTION_TIME, TimeUnit.MILLISECONDS);
if (failure != null)
exceptions.addFirst(failure);
running = false;
exec.shutdown();
if (!exec.awaitTermination(1000, TimeUnit.SECONDS))
throw new IllegalStateException();
for (Future<Void> f : futures) {
// check for exceptions
f.get();
}
checkForEmptyPendingPuts();
log.infof("Generated %d timestamps%n", timestampGenerator.get());
AtomicInteger created = new AtomicInteger();
AtomicInteger removed = new AtomicInteger();
ForkJoinPool threadPool = ForkJoinPool.commonPool();
ArrayList<ForkJoinTask<?>> tasks = new ArrayList<>();
for (Map.Entry<Integer, List<Log<String>>> entry : allFamilyNames.entrySet()) {
tasks.add(threadPool.submit(() -> {
int familyId = entry.getKey();
List<Log<String>> list = entry.getValue();
created.incrementAndGet();
NavigableMap<Integer, List<Log<String>>> logByTime = getWritesAtTime(list);
checkCorrectness("family_name-" + familyId + "-", list, logByTime);
if (list.stream().anyMatch(l -> l.type == LogType.WRITE && l.getValue() == null)) {
removed.incrementAndGet();
}
}));
}
for (Map.Entry<Integer, List<Log<Set<String>>>> entry : allFamilyMembers.entrySet()) {
tasks.add(threadPool.submit(() -> {
int familyId = entry.getKey();
List<Log<Set<String>>> list = entry.getValue();
NavigableMap<Integer, List<Log<Set<String>>>> logByTime = getWritesAtTime(list);
checkCorrectness("family_members-" + familyId + "-", list, logByTime);
}));
}
for (ForkJoinTask<?> task : tasks) {
// with heavy logging this may have trouble to complete
task.get(30, TimeUnit.SECONDS);
}
if (!exceptions.isEmpty()) {
for (Exception e : exceptions) {
log.error("Test failure", e);
}
throw new IllegalStateException("There were " + exceptions.size() + " exceptions");
}
log.infof("Created %d families, removed %d%n", created.get(), removed.get());
}
Aggregations