use of java.util.concurrent.ThreadFactory in project hbase by apache.
the class MemStoreFlusher method start.
synchronized void start(UncaughtExceptionHandler eh) {
ThreadFactory flusherThreadFactory = Threads.newDaemonThreadFactory(server.getServerName().toShortString() + "-MemStoreFlusher", eh);
for (int i = 0; i < flushHandlers.length; i++) {
flushHandlers[i] = new FlushHandler("MemStoreFlusher." + i);
flusherThreadFactory.newThread(flushHandlers[i]);
flushHandlers[i].start();
}
}
use of java.util.concurrent.ThreadFactory in project hbase by apache.
the class ModifyRegionUtils method getRegionOpenAndInitThreadPool.
/*
* used by createRegions() to get the thread pool executor based on the
* "hbase.hregion.open.and.init.threads.max" property.
*/
static ThreadPoolExecutor getRegionOpenAndInitThreadPool(final Configuration conf, final String threadNamePrefix, int regionNumber) {
int maxThreads = Math.min(regionNumber, conf.getInt("hbase.hregion.open.and.init.threads.max", 10));
ThreadPoolExecutor regionOpenAndInitThreadPool = Threads.getBoundedCachedThreadPool(maxThreads, 30L, TimeUnit.SECONDS, new ThreadFactory() {
private int count = 1;
@Override
public Thread newThread(Runnable r) {
return new Thread(r, threadNamePrefix + "-" + count++);
}
});
return regionOpenAndInitThreadPool;
}
use of java.util.concurrent.ThreadFactory in project hadoop by apache.
the class NMClientAsyncImpl method serviceStart.
@Override
protected void serviceStart() throws Exception {
client.start();
ThreadFactory tf = new ThreadFactoryBuilder().setNameFormat(this.getClass().getName() + " #%d").setDaemon(true).build();
// Start with a default core-pool size and change it dynamically.
int initSize = Math.min(INITIAL_THREAD_POOL_SIZE, maxThreadPoolSize);
threadPool = new ThreadPoolExecutor(initSize, Integer.MAX_VALUE, 1, TimeUnit.HOURS, new LinkedBlockingQueue<Runnable>(), tf);
eventDispatcherThread = new Thread() {
@Override
public void run() {
ContainerEvent event = null;
Set<String> allNodes = new HashSet<String>();
while (!stopped.get() && !Thread.currentThread().isInterrupted()) {
try {
event = events.take();
} catch (InterruptedException e) {
if (!stopped.get()) {
LOG.error("Returning, thread interrupted", e);
}
return;
}
allNodes.add(event.getNodeId().toString());
int threadPoolSize = threadPool.getCorePoolSize();
// limit yet.
if (threadPoolSize != maxThreadPoolSize) {
// nodes where containers will run at *this* point of time. This is
// *not* the cluster size and doesn't need to be.
int nodeNum = allNodes.size();
int idealThreadPoolSize = Math.min(maxThreadPoolSize, nodeNum);
if (threadPoolSize < idealThreadPoolSize) {
// Bump up the pool size to idealThreadPoolSize +
// INITIAL_POOL_SIZE, the later is just a buffer so we are not
// always increasing the pool-size
int newThreadPoolSize = Math.min(maxThreadPoolSize, idealThreadPoolSize + INITIAL_THREAD_POOL_SIZE);
LOG.info("Set NMClientAsync thread pool size to " + newThreadPoolSize + " as the number of nodes to talk to is " + nodeNum);
threadPool.setCorePoolSize(newThreadPoolSize);
}
}
// the events from the queue are handled in parallel with a thread
// pool
threadPool.execute(getContainerEventProcessor(event));
// TODO: Group launching of multiple containers to a single
// NodeManager into a single connection
}
}
};
eventDispatcherThread.setName("Container Event Dispatcher");
eventDispatcherThread.setDaemon(false);
eventDispatcherThread.start();
super.serviceStart();
}
use of java.util.concurrent.ThreadFactory in project hadoop by apache.
the class CommitterEventHandler method serviceStart.
@Override
protected void serviceStart() throws Exception {
ThreadFactoryBuilder tfBuilder = new ThreadFactoryBuilder().setNameFormat("CommitterEvent Processor #%d");
if (jobClassLoader != null) {
// if the job classloader is enabled, we need to use the job classloader
// as the thread context classloader (TCCL) of these threads in case the
// committer needs to load another class via TCCL
ThreadFactory backingTf = new ThreadFactory() {
@Override
public Thread newThread(Runnable r) {
Thread thread = new Thread(r);
thread.setContextClassLoader(jobClassLoader);
return thread;
}
};
tfBuilder.setThreadFactory(backingTf);
}
ThreadFactory tf = tfBuilder.build();
launcherPool = new HadoopThreadPoolExecutor(5, 5, 1, TimeUnit.HOURS, new LinkedBlockingQueue<Runnable>(), tf);
eventHandlingThread = new Thread(new Runnable() {
@Override
public void run() {
CommitterEvent event = null;
while (!stopped.get() && !Thread.currentThread().isInterrupted()) {
try {
event = eventQueue.take();
} catch (InterruptedException e) {
if (!stopped.get()) {
LOG.error("Returning, interrupted : " + e);
}
return;
}
// the events from the queue are handled in parallel
// using a thread pool
launcherPool.execute(new EventProcessor(event));
}
}
});
eventHandlingThread.setName("CommitterEvent Handler");
eventHandlingThread.start();
super.serviceStart();
}
use of java.util.concurrent.ThreadFactory in project Mycat-Server by MyCATApache.
the class MycatServer method startup.
public void startup() throws IOException {
SystemConfig system = config.getSystem();
int processorCount = system.getProcessors();
// server startup
LOGGER.info(NAME + " is ready to startup ...");
String inf = "Startup processors ...,total processors:" + system.getProcessors() + ",aio thread pool size:" + system.getProcessorExecutor() + " \r\n each process allocated socket buffer pool " + " bytes ,a page size:" + system.getBufferPoolPageSize() + " a page's chunk number(PageSize/ChunkSize) is:" + (system.getBufferPoolPageSize() / system.getBufferPoolChunkSize()) + " buffer page's number is:" + system.getBufferPoolPageNumber();
LOGGER.info(inf);
LOGGER.info("sysconfig params:" + system.toString());
// startup manager
ManagerConnectionFactory mf = new ManagerConnectionFactory();
ServerConnectionFactory sf = new ServerConnectionFactory();
SocketAcceptor manager = null;
SocketAcceptor server = null;
aio = (system.getUsingAIO() == 1);
// startup processors
int threadPoolSize = system.getProcessorExecutor();
processors = new NIOProcessor[processorCount];
// a page size
int bufferPoolPageSize = system.getBufferPoolPageSize();
// total page number
short bufferPoolPageNumber = system.getBufferPoolPageNumber();
//minimum allocation unit
short bufferPoolChunkSize = system.getBufferPoolChunkSize();
int socketBufferLocalPercent = system.getProcessorBufferLocalPercent();
int bufferPoolType = system.getProcessorBufferPoolType();
switch(bufferPoolType) {
case 0:
bufferPool = new DirectByteBufferPool(bufferPoolPageSize, bufferPoolChunkSize, bufferPoolPageNumber, system.getFrontSocketSoRcvbuf());
totalNetWorkBufferSize = bufferPoolPageSize * bufferPoolPageNumber;
break;
case 1:
/**
* todo 对应权威指南修改:
*
* bytebufferarena由6个bytebufferlist组成,这六个list有减少内存碎片的机制
* 每个bytebufferlist由多个bytebufferchunk组成,每个list也有减少内存碎片的机制
* 每个bytebufferchunk由多个page组成,平衡二叉树管理内存使用状态,计算灵活
* 设置的pagesize对应bytebufferarena里面的每个bytebufferlist的每个bytebufferchunk的buffer长度
* bufferPoolChunkSize对应每个bytebufferchunk的每个page的长度
* bufferPoolPageNumber对应每个bytebufferlist有多少个bytebufferchunk
*/
totalNetWorkBufferSize = 6 * bufferPoolPageSize * bufferPoolPageNumber;
break;
default:
bufferPool = new DirectByteBufferPool(bufferPoolPageSize, bufferPoolChunkSize, bufferPoolPageNumber, system.getFrontSocketSoRcvbuf());
;
totalNetWorkBufferSize = bufferPoolPageSize * bufferPoolPageNumber;
}
/**
* Off Heap For Merge/Order/Group/Limit 初始化
*/
if (system.getUseOffHeapForMerge() == 1) {
try {
myCatMemory = new MyCatMemory(system, totalNetWorkBufferSize);
} catch (NoSuchFieldException e) {
LOGGER.error("NoSuchFieldException", e);
} catch (IllegalAccessException e) {
LOGGER.error("Error", e);
}
}
businessExecutor = ExecutorUtil.create("BusinessExecutor", threadPoolSize);
timerExecutor = ExecutorUtil.create("Timer", system.getTimerExecutor());
listeningExecutorService = MoreExecutors.listeningDecorator(businessExecutor);
for (int i = 0; i < processors.length; i++) {
processors[i] = new NIOProcessor("Processor" + i, bufferPool, businessExecutor);
}
if (aio) {
LOGGER.info("using aio network handler ");
asyncChannelGroups = new AsynchronousChannelGroup[processorCount];
// startup connector
connector = new AIOConnector();
for (int i = 0; i < processors.length; i++) {
asyncChannelGroups[i] = AsynchronousChannelGroup.withFixedThreadPool(processorCount, new ThreadFactory() {
private int inx = 1;
@Override
public Thread newThread(Runnable r) {
Thread th = new Thread(r);
//TODO
th.setName(DirectByteBufferPool.LOCAL_BUF_THREAD_PREX + "AIO" + (inx++));
LOGGER.info("created new AIO thread " + th.getName());
return th;
}
});
}
manager = new AIOAcceptor(NAME + "Manager", system.getBindIp(), system.getManagerPort(), mf, this.asyncChannelGroups[0]);
// startup server
server = new AIOAcceptor(NAME + "Server", system.getBindIp(), system.getServerPort(), sf, this.asyncChannelGroups[0]);
} else {
LOGGER.info("using nio network handler ");
NIOReactorPool reactorPool = new NIOReactorPool(DirectByteBufferPool.LOCAL_BUF_THREAD_PREX + "NIOREACTOR", processors.length);
connector = new NIOConnector(DirectByteBufferPool.LOCAL_BUF_THREAD_PREX + "NIOConnector", reactorPool);
((NIOConnector) connector).start();
manager = new NIOAcceptor(DirectByteBufferPool.LOCAL_BUF_THREAD_PREX + NAME + "Manager", system.getBindIp(), system.getManagerPort(), mf, reactorPool);
server = new NIOAcceptor(DirectByteBufferPool.LOCAL_BUF_THREAD_PREX + NAME + "Server", system.getBindIp(), system.getServerPort(), sf, reactorPool);
}
// manager start
manager.start();
LOGGER.info(manager.getName() + " is started and listening on " + manager.getPort());
server.start();
// server started
LOGGER.info(server.getName() + " is started and listening on " + server.getPort());
LOGGER.info("===============================================");
// init datahost
Map<String, PhysicalDBPool> dataHosts = config.getDataHosts();
LOGGER.info("Initialize dataHost ...");
for (PhysicalDBPool node : dataHosts.values()) {
String index = dnIndexProperties.getProperty(node.getHostName(), "0");
if (!"0".equals(index)) {
LOGGER.info("init datahost: " + node.getHostName() + " to use datasource index:" + index);
}
node.init(Integer.parseInt(index));
node.startHeartbeat();
}
long dataNodeIldeCheckPeriod = system.getDataNodeIdleCheckPeriod();
heartbeatScheduler.scheduleAtFixedRate(updateTime(), 0L, TIME_UPDATE_PERIOD, TimeUnit.MILLISECONDS);
heartbeatScheduler.scheduleAtFixedRate(processorCheck(), 0L, system.getProcessorCheckPeriod(), TimeUnit.MILLISECONDS);
heartbeatScheduler.scheduleAtFixedRate(dataNodeConHeartBeatCheck(dataNodeIldeCheckPeriod), 0L, dataNodeIldeCheckPeriod, TimeUnit.MILLISECONDS);
heartbeatScheduler.scheduleAtFixedRate(dataNodeHeartbeat(), 0L, system.getDataNodeHeartbeatPeriod(), TimeUnit.MILLISECONDS);
heartbeatScheduler.scheduleAtFixedRate(dataSourceOldConsClear(), 0L, DEFAULT_OLD_CONNECTION_CLEAR_PERIOD, TimeUnit.MILLISECONDS);
scheduler.schedule(catletClassClear(), 30000, TimeUnit.MILLISECONDS);
if (system.getCheckTableConsistency() == 1) {
scheduler.scheduleAtFixedRate(tableStructureCheck(), 0L, system.getCheckTableConsistencyPeriod(), TimeUnit.MILLISECONDS);
}
if (system.getUseSqlStat() == 1) {
scheduler.scheduleAtFixedRate(recycleSqlStat(), 0L, DEFAULT_SQL_STAT_RECYCLE_PERIOD, TimeUnit.MILLISECONDS);
}
if (system.getUseGlobleTableCheck() == 1) {
// 全局表一致性检测是否开启
scheduler.scheduleAtFixedRate(glableTableConsistencyCheck(), 0L, system.getGlableTableCheckPeriod(), TimeUnit.MILLISECONDS);
}
//定期清理结果集排行榜,控制拒绝策略
scheduler.scheduleAtFixedRate(resultSetMapClear(), 0L, system.getClearBigSqLResultSetMapMs(), TimeUnit.MILLISECONDS);
RouteStrategyFactory.init();
// new Thread(tableStructureCheck()).start();
//XA Init recovery Log
LOGGER.info("===============================================");
LOGGER.info("Perform XA recovery log ...");
performXARecoveryLog();
if (isUseZkSwitch()) {
//首次启动如果发现zk上dnindex为空,则将本地初始化上zk
initZkDnindex();
}
initRuleData();
startup.set(true);
}
Aggregations