use of com.google.common.util.concurrent.ThreadFactoryBuilder in project hive by apache.
the class ZkRegistryBase method ensureInstancesCache.
// Bogus warnings despite closeQuietly.
@SuppressWarnings("resource")
protected final synchronized PathChildrenCache ensureInstancesCache(long clusterReadyTimeoutMs) throws IOException {
Preconditions.checkArgument(zooKeeperClient != null && zooKeeperClient.getState() == CuratorFrameworkState.STARTED, "client is not started");
// lazily create PathChildrenCache
PathChildrenCache instancesCache = this.instancesCache;
if (instancesCache != null)
return instancesCache;
ExecutorService tp = Executors.newFixedThreadPool(1, new ThreadFactoryBuilder().setDaemon(true).setNameFormat("StateChangeNotificationHandler").build());
long startTimeNs = System.nanoTime(), deltaNs = clusterReadyTimeoutMs * 1000000L;
long sleepTimeMs = Math.min(16, clusterReadyTimeoutMs);
while (true) {
instancesCache = new PathChildrenCache(zooKeeperClient, workersPath, true);
instancesCache.getListenable().addListener(new InstanceStateChangeListener(), tp);
try {
instancesCache.start(PathChildrenCache.StartMode.BUILD_INITIAL_CACHE);
this.instancesCache = instancesCache;
return instancesCache;
} catch (InvalidACLException e) {
// PathChildrenCache tried to mkdir when the znode wasn't there, and failed.
CloseableUtils.closeQuietly(instancesCache);
long elapsedNs = System.nanoTime() - startTimeNs;
if (deltaNs == 0 || deltaNs <= elapsedNs) {
LOG.error("Unable to start curator PathChildrenCache", e);
throw new IOException(e);
}
LOG.warn("The cluster is not started yet (InvalidACL); will retry");
try {
Thread.sleep(Math.min(sleepTimeMs, (deltaNs - elapsedNs) / 1000000L));
} catch (InterruptedException e1) {
LOG.error("Interrupted while retrying the PathChildrenCache startup");
throw new IOException(e1);
}
sleepTimeMs = sleepTimeMs << 1;
} catch (Exception e) {
CloseableUtils.closeQuietly(instancesCache);
LOG.error("Unable to start curator PathChildrenCache", e);
throw new IOException(e);
}
}
}
use of com.google.common.util.concurrent.ThreadFactoryBuilder in project pravega by pravega.
the class MultiReadersEndToEndTest method readAllEvents.
private Collection<Integer> readAllEvents(final int numParallelReaders, ClientFactory clientFactory, final String readerGroupName, final int numSegments) {
ConcurrentLinkedQueue<Integer> read = new ConcurrentLinkedQueue<>();
final ExecutorService executorService = Executors.newFixedThreadPool(numParallelReaders, new ThreadFactoryBuilder().setNameFormat("testreader-pool-%d").build());
List<Future<?>> futures = new ArrayList<>();
for (int i = 0; i < numParallelReaders; i++) {
futures.add(executorService.submit(() -> {
final String readerId = UUID.randomUUID().toString();
@Cleanup final EventStreamReader<Integer> reader = clientFactory.createReader(readerId, readerGroupName, new IntegerSerializer(), ReaderConfig.builder().build());
int emptyCount = 0;
while (emptyCount <= numSegments) {
try {
final Integer integerEventRead = reader.readNextEvent(100).getEvent();
if (integerEventRead != null) {
read.add(integerEventRead);
emptyCount = 0;
} else {
emptyCount++;
}
} catch (ReinitializationRequiredException e) {
throw new RuntimeException(e);
}
}
}));
}
// Wait until all readers are done.
futures.forEach(f -> Futures.getAndHandleExceptions(f, RuntimeException::new));
executorService.shutdownNow();
return read;
}
use of com.google.common.util.concurrent.ThreadFactoryBuilder in project opennms by OpenNMS.
the class StressCommand method execute.
@Override
public Void execute() throws Exception {
// Apply sane lower bounds to all of the configurable options
intervalInSeconds = Math.max(1, intervalInSeconds);
numberOfNodes = Math.max(1, numberOfNodes);
numberOfInterfacesPerNode = Math.max(1, numberOfInterfacesPerNode);
numberOfGroupsPerInterface = Math.max(1, numberOfGroupsPerInterface);
numberOfNumericAttributesPerGroup = Math.max(0, numberOfNumericAttributesPerGroup);
numberOfStringAttributesPerGroup = Math.max(0, numberOfStringAttributesPerGroup);
reportIntervalInSeconds = Math.max(1, reportIntervalInSeconds);
numberOfGeneratorThreads = Math.max(1, numberOfGeneratorThreads);
stringVariationFactor = Math.max(0, stringVariationFactor);
if (stringVariationFactor > 0) {
stringAttributesVaried = metrics.meter("string-attributes-varied");
}
// Display the effective settings and rates
double attributesPerSecond = (1 / (double) intervalInSeconds) * numberOfGroupsPerInterface * numberOfInterfacesPerNode * numberOfNodes;
System.out.printf("Generating collection sets every %d seconds\n", intervalInSeconds);
System.out.printf("\t for %d nodes\n", numberOfNodes);
System.out.printf("\t with %d interfaces\n", numberOfInterfacesPerNode);
System.out.printf("\t with %d attribute groups\n", numberOfGroupsPerInterface);
System.out.printf("\t with %d numeric attributes\n", numberOfNumericAttributesPerGroup);
System.out.printf("\t with %d string attributes\n", numberOfStringAttributesPerGroup);
System.out.printf("Across %d threads\n", numberOfGeneratorThreads);
if (stringVariationFactor > 0) {
System.out.printf("With string variation factor %d\n", stringVariationFactor);
}
System.out.printf("Which will yield an effective\n");
System.out.printf("\t %.2f numeric attributes per second\n", numberOfNumericAttributesPerGroup * attributesPerSecond);
System.out.printf("\t %.2f string attributes per second\n", numberOfStringAttributesPerGroup * attributesPerSecond);
ConsoleReporter reporter = ConsoleReporter.forRegistry(metrics).convertRatesTo(TimeUnit.SECONDS).convertDurationsTo(TimeUnit.MILLISECONDS).build();
// Setup the executor
ThreadFactory threadFactoy = new ThreadFactoryBuilder().setNameFormat("Metrics Stress Tool Generator #%d").build();
ExecutorService executor = Executors.newFixedThreadPool(numberOfGeneratorThreads, threadFactoy);
// Setup auxiliary objects needed by the persister
ServiceParameters params = new ServiceParameters(Collections.emptyMap());
RrdRepository repository = new RrdRepository();
repository.setStep(Math.max(intervalInSeconds, 1));
repository.setHeartBeat(repository.getStep() * 2);
if (rras != null && rras.size() > 0) {
repository.setRraList(rras);
} else {
repository.setRraList(Lists.newArrayList(// Use the default list of RRAs we provide in our stock configuration files
"RRA:AVERAGE:0.5:1:2016", "RRA:AVERAGE:0.5:12:1488", "RRA:AVERAGE:0.5:288:366", "RRA:MAX:0.5:288:366", "RRA:MIN:0.5:288:366"));
}
repository.setRrdBaseDir(Paths.get(System.getProperty("opennms.home"), "share", "rrd", "snmp").toFile());
// Calculate how we fast we should insert the collection sets
int sleepTimeInMillisBetweenNodes = 0;
int sleepTimeInSecondsBetweenIterations = 0;
System.out.printf("Sleeping for\n");
if (burst) {
sleepTimeInSecondsBetweenIterations = intervalInSeconds;
System.out.printf("\t %d seconds between batches\n", sleepTimeInSecondsBetweenIterations);
} else {
// We want to "stream" the collection sets
sleepTimeInMillisBetweenNodes = Math.round((((float) intervalInSeconds * 1000) / numberOfNodes) * numberOfGeneratorThreads);
System.out.printf("\t %d milliseconds between nodes\n", sleepTimeInMillisBetweenNodes);
}
// Start generating, and keep generating until we're interrupted
try {
reporter.start(reportIntervalInSeconds, TimeUnit.SECONDS);
while (true) {
final Context context = batchTimer.time();
try {
// Split the tasks up among the threads
List<Future<Void>> futures = new ArrayList<>();
for (int generatorThreadId = 0; generatorThreadId < numberOfGeneratorThreads; generatorThreadId++) {
futures.add(executor.submit(generateAndPersistCollectionSets(params, repository, generatorThreadId, sleepTimeInMillisBetweenNodes)));
}
// Wait for all the tasks to complete before starting others
for (Future<Void> future : futures) {
future.get();
}
} catch (InterruptedException | ExecutionException e) {
break;
} finally {
context.stop();
}
try {
Thread.sleep(sleepTimeInSecondsBetweenIterations * 1000L);
} catch (InterruptedException e) {
break;
}
}
} finally {
reporter.stop();
abort.set(true);
executor.shutdownNow();
}
return null;
}
use of com.google.common.util.concurrent.ThreadFactoryBuilder in project opennms by OpenNMS.
the class BroadcastEventProcessor method setupThreadPool.
private void setupThreadPool() throws IOException {
// determine the size of the thread pool
final int maxThreads = getNotifdConfigManager().getConfiguration().getMaxThreads();
// enforce no limit when the value is <= 0
final int effectiveMaxThreads = maxThreads > 0 ? maxThreads : Integer.MAX_VALUE;
// make it easier to identify the notification task threads
final ThreadFactory threadFactory = new ThreadFactoryBuilder().setNameFormat("NotificationTask-%d").build();
// use an unbounded queue to hold any additional tasks
// this may not ideal, but it's safer than the previous approach of immediately
// creating a thread for every task
final BlockingQueue<Runnable> queue = new LinkedBlockingQueue<>();
// create the thread pool that enforces a ceiling on the number of concurrent threads
m_notificationTaskExecutor = new ThreadPoolExecutor(effectiveMaxThreads, effectiveMaxThreads, 60L, TimeUnit.SECONDS, queue, threadFactory);
m_notificationTaskExecutor.allowCoreThreadTimeOut(true);
}
use of com.google.common.util.concurrent.ThreadFactoryBuilder in project bazel by bazelbuild.
the class SkyframeActionExecutor method constructActionGraphAndPathMap.
/**
* Simultaneously construct an action graph for all the actions in Skyframe and a map from
* {@link PathFragment}s to their respective {@link Artifact}s. We do this in a threadpool to save
* around 1.5 seconds on a mid-sized build versus a single-threaded operation.
*/
private static Pair<ActionGraph, SortedMap<PathFragment, Artifact>> constructActionGraphAndPathMap(Iterable<ActionLookupValue> values, ConcurrentMap<ActionAnalysisMetadata, ConflictException> badActionMap) throws InterruptedException {
MutableActionGraph actionGraph = new MapBasedActionGraph();
ConcurrentNavigableMap<PathFragment, Artifact> artifactPathMap = new ConcurrentSkipListMap<>();
// Action graph construction is CPU-bound.
int numJobs = Runtime.getRuntime().availableProcessors();
// No great reason for expecting 5000 action lookup values, but not worth counting size of
// values.
Sharder<ActionLookupValue> actionShards = new Sharder<>(numJobs, 5000);
for (ActionLookupValue value : values) {
actionShards.add(value);
}
ThrowableRecordingRunnableWrapper wrapper = new ThrowableRecordingRunnableWrapper("SkyframeActionExecutor#constructActionGraphAndPathMap");
ExecutorService executor = Executors.newFixedThreadPool(numJobs, new ThreadFactoryBuilder().setNameFormat("ActionLookupValue Processor %d").build());
for (List<ActionLookupValue> shard : actionShards) {
executor.execute(wrapper.wrap(actionRegistration(shard, actionGraph, artifactPathMap, badActionMap)));
}
boolean interrupted = ExecutorUtil.interruptibleShutdown(executor);
Throwables.propagateIfPossible(wrapper.getFirstThrownError());
if (interrupted) {
throw new InterruptedException();
}
return Pair.<ActionGraph, SortedMap<PathFragment, Artifact>>of(actionGraph, artifactPathMap);
}
Aggregations