use of java.util.concurrent.ThreadFactory in project hive by apache.
the class HiveMetaStoreChecker method checkPartitionDirs.
/**
* Assume that depth is 2, i.e., partition columns are a and b
* tblPath/a=1 => throw exception
* tblPath/a=1/file => throw exception
* tblPath/a=1/b=2/file => return a=1/b=2
* tblPath/a=1/b=2/c=3 => return a=1/b=2
* tblPath/a=1/b=2/c=3/file => return a=1/b=2
*
* @param basePath
* Start directory
* @param allDirs
* This set will contain the leaf paths at the end.
* @param maxDepth
* Specify how deep the search goes.
* @throws IOException
* Thrown if we can't get lists from the fs.
* @throws HiveException
*/
private void checkPartitionDirs(Path basePath, Set<Path> allDirs, int maxDepth) throws IOException, HiveException {
// Here we just reuse the THREAD_COUNT configuration for
// METASTORE_FS_HANDLER_THREADS_COUNT since this results in better performance
// The number of missing partitions discovered are later added by metastore using a
// threadpool of size METASTORE_FS_HANDLER_THREADS_COUNT. If we have different sized
// pool here the smaller sized pool of the two becomes a bottleneck
int poolSize = conf.getInt(ConfVars.METASTORE_FS_HANDLER_THREADS_COUNT.varname, 15);
ExecutorService executor;
if (poolSize <= 1) {
LOG.debug("Using single-threaded version of MSCK-GetPaths");
executor = MoreExecutors.sameThreadExecutor();
} else {
LOG.debug("Using multi-threaded version of MSCK-GetPaths with number of threads " + poolSize);
ThreadFactory threadFactory = new ThreadFactoryBuilder().setDaemon(true).setNameFormat("MSCK-GetPaths-%d").build();
executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(poolSize, threadFactory);
}
checkPartitionDirs(executor, basePath, allDirs, basePath.getFileSystem(conf), maxDepth);
executor.shutdown();
}
use of java.util.concurrent.ThreadFactory in project hive by apache.
the class HouseKeeperServiceBase method start.
@Override
public void start(HiveConf hiveConf) throws Exception {
this.hiveConf = hiveConf;
HiveTxnManager mgr = TxnManagerFactory.getTxnManagerFactory().getTxnManager(hiveConf);
if (!mgr.supportsAcid()) {
LOG.info(this.getClass().getName() + " not started since " + mgr.getClass().getName() + " does not support Acid.");
//there are no transactions in this case
return;
}
pool = Executors.newScheduledThreadPool(1, new ThreadFactory() {
private final AtomicInteger threadCounter = new AtomicInteger();
@Override
public Thread newThread(Runnable r) {
return new Thread(r, HouseKeeperServiceBase.this.getClass().getName() + "-" + threadCounter.getAndIncrement());
}
});
TimeUnit tu = TimeUnit.MILLISECONDS;
pool.scheduleAtFixedRate(getScheduedAction(hiveConf, isAliveCounter), getStartDelayMs(), getIntervalMs(), tu);
LOG.info("Started " + this.getClass().getName() + " with delay/interval = " + getStartDelayMs() + "/" + getIntervalMs() + " " + tu);
}
use of java.util.concurrent.ThreadFactory in project hive by apache.
the class ATSHook method setupAtsExecutor.
private static void setupAtsExecutor(HiveConf conf) {
synchronized (LOCK) {
if (executor == null) {
// The call to ATS appears to block indefinitely, blocking the ATS thread while
// the hook continues to submit work to the ExecutorService with each query.
// Over time the queued items can cause OOM as the HookContext seems to contain
// some items which use a lot of memory.
// Prevent this situation by creating executor with bounded capacity -
// the event will not be sent to ATS if there are too many outstanding work submissions.
int queueCapacity = conf.getIntVar(HiveConf.ConfVars.ATSHOOKQUEUECAPACITY);
// Executor to create the ATS events.
// This can use significant resources and should not be done on the main query thread.
LOG.info("Creating ATS executor queue with capacity " + queueCapacity);
BlockingQueue<Runnable> queue = new LinkedBlockingQueue<Runnable>(queueCapacity);
ThreadFactory threadFactory = new ThreadFactoryBuilder().setDaemon(true).setNameFormat("ATS Logger %d").build();
executor = new ThreadPoolExecutor(1, 1, 0, TimeUnit.MILLISECONDS, queue, threadFactory);
// Create a separate thread to send the events.
// Keep separate from the creating events in case the send blocks.
BlockingQueue<Runnable> senderQueue = new LinkedBlockingQueue<Runnable>(queueCapacity);
senderExecutor = new ThreadPoolExecutor(1, 1, 0, TimeUnit.MILLISECONDS, senderQueue, threadFactory);
YarnConfiguration yarnConf = new YarnConfiguration();
timelineClient = TimelineClient.createTimelineClient();
timelineClient.init(yarnConf);
timelineClient.start();
ShutdownHookManager.addShutdownHook(new Runnable() {
@Override
public void run() {
try {
executor.shutdown();
executor.awaitTermination(WAIT_TIME, TimeUnit.SECONDS);
executor = null;
} catch (InterruptedException ie) {
/* ignore */
}
timelineClient.stop();
}
});
}
}
}
use of java.util.concurrent.ThreadFactory in project hive by apache.
the class DbTxnManager method initHeartbeatExecutorService.
private synchronized void initHeartbeatExecutorService() {
if (heartbeatExecutorService != null && !heartbeatExecutorService.isShutdown() && !heartbeatExecutorService.isTerminated()) {
return;
}
heartbeatExecutorService = Executors.newScheduledThreadPool(conf.getIntVar(HiveConf.ConfVars.HIVE_TXN_HEARTBEAT_THREADPOOL_SIZE), new ThreadFactory() {
private final AtomicInteger threadCounter = new AtomicInteger();
@Override
public Thread newThread(Runnable r) {
return new HeartbeaterThread(r, "Heartbeater-" + threadCounter.getAndIncrement());
}
});
((ScheduledThreadPoolExecutor) heartbeatExecutorService).setRemoveOnCancelPolicy(true);
}
use of java.util.concurrent.ThreadFactory in project jmxtrans by jmxtrans.
the class JmxTransModule method createExecutorService.
private ThreadPoolExecutor createExecutorService(int poolSize, int workQueueCapacity, String componentName) {
BlockingQueue<Runnable> workQueue = new LinkedBlockingQueue<>(workQueueCapacity);
ThreadFactory threadFactory = threadFactory(componentName);
return new ThreadPoolExecutor(poolSize, poolSize, 0L, MILLISECONDS, workQueue, threadFactory);
}
Aggregations