use of com.google.common.util.concurrent.ThreadFactoryBuilder in project hadoop by apache.
the class NMClientAsyncImpl method serviceStart.
@Override
protected void serviceStart() throws Exception {
client.start();
ThreadFactory tf = new ThreadFactoryBuilder().setNameFormat(this.getClass().getName() + " #%d").setDaemon(true).build();
// Start with a default core-pool size and change it dynamically.
int initSize = Math.min(INITIAL_THREAD_POOL_SIZE, maxThreadPoolSize);
threadPool = new ThreadPoolExecutor(initSize, Integer.MAX_VALUE, 1, TimeUnit.HOURS, new LinkedBlockingQueue<Runnable>(), tf);
eventDispatcherThread = new Thread() {
@Override
public void run() {
ContainerEvent event = null;
Set<String> allNodes = new HashSet<String>();
while (!stopped.get() && !Thread.currentThread().isInterrupted()) {
try {
event = events.take();
} catch (InterruptedException e) {
if (!stopped.get()) {
LOG.error("Returning, thread interrupted", e);
}
return;
}
allNodes.add(event.getNodeId().toString());
int threadPoolSize = threadPool.getCorePoolSize();
// limit yet.
if (threadPoolSize != maxThreadPoolSize) {
// nodes where containers will run at *this* point of time. This is
// *not* the cluster size and doesn't need to be.
int nodeNum = allNodes.size();
int idealThreadPoolSize = Math.min(maxThreadPoolSize, nodeNum);
if (threadPoolSize < idealThreadPoolSize) {
// Bump up the pool size to idealThreadPoolSize +
// INITIAL_POOL_SIZE, the later is just a buffer so we are not
// always increasing the pool-size
int newThreadPoolSize = Math.min(maxThreadPoolSize, idealThreadPoolSize + INITIAL_THREAD_POOL_SIZE);
LOG.info("Set NMClientAsync thread pool size to " + newThreadPoolSize + " as the number of nodes to talk to is " + nodeNum);
threadPool.setCorePoolSize(newThreadPoolSize);
}
}
// the events from the queue are handled in parallel with a thread
// pool
threadPool.execute(getContainerEventProcessor(event));
// TODO: Group launching of multiple containers to a single
// NodeManager into a single connection
}
}
};
eventDispatcherThread.setName("Container Event Dispatcher");
eventDispatcherThread.setDaemon(false);
eventDispatcherThread.start();
super.serviceStart();
}
use of com.google.common.util.concurrent.ThreadFactoryBuilder in project hadoop by apache.
the class CleanerService method serviceInit.
@Override
protected void serviceInit(Configuration conf) throws Exception {
this.conf = conf;
// create scheduler executor service that services the cleaner tasks
// use 2 threads to accommodate the on-demand tasks and reduce the chance of
// back-to-back runs
ThreadFactory tf = new ThreadFactoryBuilder().setNameFormat("Shared cache cleaner").build();
scheduledExecutor = HadoopExecutors.newScheduledThreadPool(2, tf);
super.serviceInit(conf);
}
use of com.google.common.util.concurrent.ThreadFactoryBuilder in project hadoop by apache.
the class InMemorySCMStore method serviceInit.
/**
* The in-memory store bootstraps itself from the shared cache entries that
* exist in HDFS.
*/
@Override
protected void serviceInit(Configuration conf) throws Exception {
this.startTime = System.currentTimeMillis();
this.initialDelayMin = getInitialDelay(conf);
this.checkPeriodMin = getCheckPeriod(conf);
this.stalenessMinutes = getStalenessPeriod(conf);
bootstrap(conf);
ThreadFactory tf = new ThreadFactoryBuilder().setNameFormat("InMemorySCMStore").build();
scheduler = HadoopExecutors.newSingleThreadScheduledExecutor(tf);
super.serviceInit(conf);
}
use of com.google.common.util.concurrent.ThreadFactoryBuilder in project hbase by apache.
the class PerformanceEvaluation method doLocalClients.
/*
* Run all clients in this vm each to its own thread.
*/
static RunResult[] doLocalClients(final TestOptions opts, final Configuration conf) throws IOException, InterruptedException {
final Class<? extends Test> cmd = determineCommandClass(opts.cmdName);
assert cmd != null;
@SuppressWarnings("unchecked") Future<RunResult>[] threads = new Future[opts.numClientThreads];
RunResult[] results = new RunResult[opts.numClientThreads];
ExecutorService pool = Executors.newFixedThreadPool(opts.numClientThreads, new ThreadFactoryBuilder().setNameFormat("TestClient-%s").build());
final Connection con = ConnectionFactory.createConnection(conf);
for (int i = 0; i < threads.length; i++) {
final int index = i;
threads[i] = pool.submit(new Callable<RunResult>() {
@Override
public RunResult call() throws Exception {
TestOptions threadOpts = new TestOptions(opts);
if (threadOpts.startRow == 0)
threadOpts.startRow = index * threadOpts.perClientRunRows;
RunResult run = runOneClient(cmd, conf, con, threadOpts, new Status() {
@Override
public void setStatus(final String msg) throws IOException {
LOG.info(msg);
}
});
LOG.info("Finished " + Thread.currentThread().getName() + " in " + run.duration + "ms over " + threadOpts.perClientRunRows + " rows");
return run;
}
});
}
pool.shutdown();
for (int i = 0; i < threads.length; i++) {
try {
results[i] = threads[i].get();
} catch (ExecutionException e) {
throw new IOException(e.getCause());
}
}
final String test = cmd.getSimpleName();
LOG.info("[" + test + "] Summary of timings (ms): " + Arrays.toString(results));
Arrays.sort(results);
long total = 0;
for (RunResult result : results) {
total += result.duration;
}
LOG.info("[" + test + "]" + "\tMin: " + results[0] + "ms" + "\tMax: " + results[results.length - 1] + "ms" + "\tAvg: " + (total / results.length) + "ms");
con.close();
return results;
}
use of com.google.common.util.concurrent.ThreadFactoryBuilder in project hadoop by apache.
the class EntityGroupFSTimelineStore method serviceStart.
@Override
protected void serviceStart() throws Exception {
super.serviceStart();
LOG.info("Starting {}", getName());
summaryStore.start();
Configuration conf = getConfig();
aclManager = new TimelineACLsManager(conf);
aclManager.setTimelineStore(summaryStore);
summaryTdm = new TimelineDataManager(summaryStore, aclManager);
summaryTdm.init(conf);
addService(summaryTdm);
// start child services that aren't already started
super.serviceStart();
if (!fs.exists(activeRootPath)) {
fs.mkdirs(activeRootPath);
fs.setPermission(activeRootPath, ACTIVE_DIR_PERMISSION);
}
if (!fs.exists(doneRootPath)) {
fs.mkdirs(doneRootPath);
fs.setPermission(doneRootPath, DONE_DIR_PERMISSION);
}
objMapper = new ObjectMapper();
objMapper.setAnnotationIntrospector(new JaxbAnnotationIntrospector(TypeFactory.defaultInstance()));
jsonFactory = new MappingJsonFactory(objMapper);
final long scanIntervalSecs = conf.getLong(YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_SCAN_INTERVAL_SECONDS, YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_SCAN_INTERVAL_SECONDS_DEFAULT);
final long cleanerIntervalSecs = conf.getLong(YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_CLEANER_INTERVAL_SECONDS, YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_CLEANER_INTERVAL_SECONDS_DEFAULT);
final int numThreads = conf.getInt(YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_THREADS, YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_THREADS_DEFAULT);
LOG.info("Scanning active directory {} every {} seconds", activeRootPath, scanIntervalSecs);
LOG.info("Cleaning logs every {} seconds", cleanerIntervalSecs);
executor = new ScheduledThreadPoolExecutor(numThreads, new ThreadFactoryBuilder().setNameFormat("EntityLogPluginWorker #%d").build());
executor.scheduleAtFixedRate(new EntityLogScanner(), 0, scanIntervalSecs, TimeUnit.SECONDS);
executor.scheduleAtFixedRate(new EntityLogCleaner(), cleanerIntervalSecs, cleanerIntervalSecs, TimeUnit.SECONDS);
}
Aggregations