Search in sources :

Example 41 with ThreadFactory

use of java.util.concurrent.ThreadFactory in project hbase by apache.

the class MemStoreFlusher method start.

synchronized void start(UncaughtExceptionHandler eh) {
    ThreadFactory flusherThreadFactory = Threads.newDaemonThreadFactory(server.getServerName().toShortString() + "-MemStoreFlusher", eh);
    for (int i = 0; i < flushHandlers.length; i++) {
        flushHandlers[i] = new FlushHandler("MemStoreFlusher." + i);
        flusherThreadFactory.newThread(flushHandlers[i]);
        flushHandlers[i].start();
    }
}
Also used : ThreadFactory(java.util.concurrent.ThreadFactory)

Example 42 with ThreadFactory

use of java.util.concurrent.ThreadFactory in project hbase by apache.

the class ModifyRegionUtils method getRegionOpenAndInitThreadPool.

/*
   * used by createRegions() to get the thread pool executor based on the
   * "hbase.hregion.open.and.init.threads.max" property.
   */
static ThreadPoolExecutor getRegionOpenAndInitThreadPool(final Configuration conf, final String threadNamePrefix, int regionNumber) {
    int maxThreads = Math.min(regionNumber, conf.getInt("hbase.hregion.open.and.init.threads.max", 10));
    ThreadPoolExecutor regionOpenAndInitThreadPool = Threads.getBoundedCachedThreadPool(maxThreads, 30L, TimeUnit.SECONDS, new ThreadFactory() {

        private int count = 1;

        @Override
        public Thread newThread(Runnable r) {
            return new Thread(r, threadNamePrefix + "-" + count++);
        }
    });
    return regionOpenAndInitThreadPool;
}
Also used : ThreadFactory(java.util.concurrent.ThreadFactory) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor)

Example 43 with ThreadFactory

use of java.util.concurrent.ThreadFactory in project hadoop by apache.

the class NMClientAsyncImpl method serviceStart.

@Override
protected void serviceStart() throws Exception {
    client.start();
    ThreadFactory tf = new ThreadFactoryBuilder().setNameFormat(this.getClass().getName() + " #%d").setDaemon(true).build();
    // Start with a default core-pool size and change it dynamically.
    int initSize = Math.min(INITIAL_THREAD_POOL_SIZE, maxThreadPoolSize);
    threadPool = new ThreadPoolExecutor(initSize, Integer.MAX_VALUE, 1, TimeUnit.HOURS, new LinkedBlockingQueue<Runnable>(), tf);
    eventDispatcherThread = new Thread() {

        @Override
        public void run() {
            ContainerEvent event = null;
            Set<String> allNodes = new HashSet<String>();
            while (!stopped.get() && !Thread.currentThread().isInterrupted()) {
                try {
                    event = events.take();
                } catch (InterruptedException e) {
                    if (!stopped.get()) {
                        LOG.error("Returning, thread interrupted", e);
                    }
                    return;
                }
                allNodes.add(event.getNodeId().toString());
                int threadPoolSize = threadPool.getCorePoolSize();
                // limit yet.
                if (threadPoolSize != maxThreadPoolSize) {
                    // nodes where containers will run at *this* point of time. This is
                    // *not* the cluster size and doesn't need to be.
                    int nodeNum = allNodes.size();
                    int idealThreadPoolSize = Math.min(maxThreadPoolSize, nodeNum);
                    if (threadPoolSize < idealThreadPoolSize) {
                        // Bump up the pool size to idealThreadPoolSize +
                        // INITIAL_POOL_SIZE, the later is just a buffer so we are not
                        // always increasing the pool-size
                        int newThreadPoolSize = Math.min(maxThreadPoolSize, idealThreadPoolSize + INITIAL_THREAD_POOL_SIZE);
                        LOG.info("Set NMClientAsync thread pool size to " + newThreadPoolSize + " as the number of nodes to talk to is " + nodeNum);
                        threadPool.setCorePoolSize(newThreadPoolSize);
                    }
                }
                // the events from the queue are handled in parallel with a thread
                // pool
                threadPool.execute(getContainerEventProcessor(event));
            // TODO: Group launching of multiple containers to a single
            // NodeManager into a single connection
            }
        }
    };
    eventDispatcherThread.setName("Container  Event Dispatcher");
    eventDispatcherThread.setDaemon(false);
    eventDispatcherThread.start();
    super.serviceStart();
}
Also used : ThreadFactory(java.util.concurrent.ThreadFactory) HashSet(java.util.HashSet) EnumSet(java.util.EnumSet) Set(java.util.Set) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue)

Example 44 with ThreadFactory

use of java.util.concurrent.ThreadFactory in project hadoop by apache.

the class CommitterEventHandler method serviceStart.

@Override
protected void serviceStart() throws Exception {
    ThreadFactoryBuilder tfBuilder = new ThreadFactoryBuilder().setNameFormat("CommitterEvent Processor #%d");
    if (jobClassLoader != null) {
        // if the job classloader is enabled, we need to use the job classloader
        // as the thread context classloader (TCCL) of these threads in case the
        // committer needs to load another class via TCCL
        ThreadFactory backingTf = new ThreadFactory() {

            @Override
            public Thread newThread(Runnable r) {
                Thread thread = new Thread(r);
                thread.setContextClassLoader(jobClassLoader);
                return thread;
            }
        };
        tfBuilder.setThreadFactory(backingTf);
    }
    ThreadFactory tf = tfBuilder.build();
    launcherPool = new HadoopThreadPoolExecutor(5, 5, 1, TimeUnit.HOURS, new LinkedBlockingQueue<Runnable>(), tf);
    eventHandlingThread = new Thread(new Runnable() {

        @Override
        public void run() {
            CommitterEvent event = null;
            while (!stopped.get() && !Thread.currentThread().isInterrupted()) {
                try {
                    event = eventQueue.take();
                } catch (InterruptedException e) {
                    if (!stopped.get()) {
                        LOG.error("Returning, interrupted : " + e);
                    }
                    return;
                }
                // the events from the queue are handled in parallel
                // using a thread pool
                launcherPool.execute(new EventProcessor(event));
            }
        }
    });
    eventHandlingThread.setName("CommitterEvent Handler");
    eventHandlingThread.start();
    super.serviceStart();
}
Also used : ThreadFactory(java.util.concurrent.ThreadFactory) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) HadoopThreadPoolExecutor(org.apache.hadoop.util.concurrent.HadoopThreadPoolExecutor)

Example 45 with ThreadFactory

use of java.util.concurrent.ThreadFactory in project Mycat-Server by MyCATApache.

the class MycatServer method startup.

public void startup() throws IOException {
    SystemConfig system = config.getSystem();
    int processorCount = system.getProcessors();
    // server startup
    LOGGER.info(NAME + " is ready to startup ...");
    String inf = "Startup processors ...,total processors:" + system.getProcessors() + ",aio thread pool size:" + system.getProcessorExecutor() + "    \r\n each process allocated socket buffer pool " + " bytes ,a page size:" + system.getBufferPoolPageSize() + "  a page's chunk number(PageSize/ChunkSize) is:" + (system.getBufferPoolPageSize() / system.getBufferPoolChunkSize()) + "  buffer page's number is:" + system.getBufferPoolPageNumber();
    LOGGER.info(inf);
    LOGGER.info("sysconfig params:" + system.toString());
    // startup manager
    ManagerConnectionFactory mf = new ManagerConnectionFactory();
    ServerConnectionFactory sf = new ServerConnectionFactory();
    SocketAcceptor manager = null;
    SocketAcceptor server = null;
    aio = (system.getUsingAIO() == 1);
    // startup processors
    int threadPoolSize = system.getProcessorExecutor();
    processors = new NIOProcessor[processorCount];
    // a page size
    int bufferPoolPageSize = system.getBufferPoolPageSize();
    // total page number 
    short bufferPoolPageNumber = system.getBufferPoolPageNumber();
    //minimum allocation unit
    short bufferPoolChunkSize = system.getBufferPoolChunkSize();
    int socketBufferLocalPercent = system.getProcessorBufferLocalPercent();
    int bufferPoolType = system.getProcessorBufferPoolType();
    switch(bufferPoolType) {
        case 0:
            bufferPool = new DirectByteBufferPool(bufferPoolPageSize, bufferPoolChunkSize, bufferPoolPageNumber, system.getFrontSocketSoRcvbuf());
            totalNetWorkBufferSize = bufferPoolPageSize * bufferPoolPageNumber;
            break;
        case 1:
            /**
				 * todo 对应权威指南修改:
				 *
				 * bytebufferarena由6个bytebufferlist组成,这六个list有减少内存碎片的机制
				 * 每个bytebufferlist由多个bytebufferchunk组成,每个list也有减少内存碎片的机制
				 * 每个bytebufferchunk由多个page组成,平衡二叉树管理内存使用状态,计算灵活
				 * 设置的pagesize对应bytebufferarena里面的每个bytebufferlist的每个bytebufferchunk的buffer长度
				 * bufferPoolChunkSize对应每个bytebufferchunk的每个page的长度
				 * bufferPoolPageNumber对应每个bytebufferlist有多少个bytebufferchunk
				 */
            totalNetWorkBufferSize = 6 * bufferPoolPageSize * bufferPoolPageNumber;
            break;
        default:
            bufferPool = new DirectByteBufferPool(bufferPoolPageSize, bufferPoolChunkSize, bufferPoolPageNumber, system.getFrontSocketSoRcvbuf());
            ;
            totalNetWorkBufferSize = bufferPoolPageSize * bufferPoolPageNumber;
    }
    /**
		 * Off Heap For Merge/Order/Group/Limit 初始化
		 */
    if (system.getUseOffHeapForMerge() == 1) {
        try {
            myCatMemory = new MyCatMemory(system, totalNetWorkBufferSize);
        } catch (NoSuchFieldException e) {
            LOGGER.error("NoSuchFieldException", e);
        } catch (IllegalAccessException e) {
            LOGGER.error("Error", e);
        }
    }
    businessExecutor = ExecutorUtil.create("BusinessExecutor", threadPoolSize);
    timerExecutor = ExecutorUtil.create("Timer", system.getTimerExecutor());
    listeningExecutorService = MoreExecutors.listeningDecorator(businessExecutor);
    for (int i = 0; i < processors.length; i++) {
        processors[i] = new NIOProcessor("Processor" + i, bufferPool, businessExecutor);
    }
    if (aio) {
        LOGGER.info("using aio network handler ");
        asyncChannelGroups = new AsynchronousChannelGroup[processorCount];
        // startup connector
        connector = new AIOConnector();
        for (int i = 0; i < processors.length; i++) {
            asyncChannelGroups[i] = AsynchronousChannelGroup.withFixedThreadPool(processorCount, new ThreadFactory() {

                private int inx = 1;

                @Override
                public Thread newThread(Runnable r) {
                    Thread th = new Thread(r);
                    //TODO
                    th.setName(DirectByteBufferPool.LOCAL_BUF_THREAD_PREX + "AIO" + (inx++));
                    LOGGER.info("created new AIO thread " + th.getName());
                    return th;
                }
            });
        }
        manager = new AIOAcceptor(NAME + "Manager", system.getBindIp(), system.getManagerPort(), mf, this.asyncChannelGroups[0]);
        // startup server
        server = new AIOAcceptor(NAME + "Server", system.getBindIp(), system.getServerPort(), sf, this.asyncChannelGroups[0]);
    } else {
        LOGGER.info("using nio network handler ");
        NIOReactorPool reactorPool = new NIOReactorPool(DirectByteBufferPool.LOCAL_BUF_THREAD_PREX + "NIOREACTOR", processors.length);
        connector = new NIOConnector(DirectByteBufferPool.LOCAL_BUF_THREAD_PREX + "NIOConnector", reactorPool);
        ((NIOConnector) connector).start();
        manager = new NIOAcceptor(DirectByteBufferPool.LOCAL_BUF_THREAD_PREX + NAME + "Manager", system.getBindIp(), system.getManagerPort(), mf, reactorPool);
        server = new NIOAcceptor(DirectByteBufferPool.LOCAL_BUF_THREAD_PREX + NAME + "Server", system.getBindIp(), system.getServerPort(), sf, reactorPool);
    }
    // manager start
    manager.start();
    LOGGER.info(manager.getName() + " is started and listening on " + manager.getPort());
    server.start();
    // server started
    LOGGER.info(server.getName() + " is started and listening on " + server.getPort());
    LOGGER.info("===============================================");
    // init datahost
    Map<String, PhysicalDBPool> dataHosts = config.getDataHosts();
    LOGGER.info("Initialize dataHost ...");
    for (PhysicalDBPool node : dataHosts.values()) {
        String index = dnIndexProperties.getProperty(node.getHostName(), "0");
        if (!"0".equals(index)) {
            LOGGER.info("init datahost: " + node.getHostName() + "  to use datasource index:" + index);
        }
        node.init(Integer.parseInt(index));
        node.startHeartbeat();
    }
    long dataNodeIldeCheckPeriod = system.getDataNodeIdleCheckPeriod();
    heartbeatScheduler.scheduleAtFixedRate(updateTime(), 0L, TIME_UPDATE_PERIOD, TimeUnit.MILLISECONDS);
    heartbeatScheduler.scheduleAtFixedRate(processorCheck(), 0L, system.getProcessorCheckPeriod(), TimeUnit.MILLISECONDS);
    heartbeatScheduler.scheduleAtFixedRate(dataNodeConHeartBeatCheck(dataNodeIldeCheckPeriod), 0L, dataNodeIldeCheckPeriod, TimeUnit.MILLISECONDS);
    heartbeatScheduler.scheduleAtFixedRate(dataNodeHeartbeat(), 0L, system.getDataNodeHeartbeatPeriod(), TimeUnit.MILLISECONDS);
    heartbeatScheduler.scheduleAtFixedRate(dataSourceOldConsClear(), 0L, DEFAULT_OLD_CONNECTION_CLEAR_PERIOD, TimeUnit.MILLISECONDS);
    scheduler.schedule(catletClassClear(), 30000, TimeUnit.MILLISECONDS);
    if (system.getCheckTableConsistency() == 1) {
        scheduler.scheduleAtFixedRate(tableStructureCheck(), 0L, system.getCheckTableConsistencyPeriod(), TimeUnit.MILLISECONDS);
    }
    if (system.getUseSqlStat() == 1) {
        scheduler.scheduleAtFixedRate(recycleSqlStat(), 0L, DEFAULT_SQL_STAT_RECYCLE_PERIOD, TimeUnit.MILLISECONDS);
    }
    if (system.getUseGlobleTableCheck() == 1) {
        // 全局表一致性检测是否开启
        scheduler.scheduleAtFixedRate(glableTableConsistencyCheck(), 0L, system.getGlableTableCheckPeriod(), TimeUnit.MILLISECONDS);
    }
    //定期清理结果集排行榜,控制拒绝策略
    scheduler.scheduleAtFixedRate(resultSetMapClear(), 0L, system.getClearBigSqLResultSetMapMs(), TimeUnit.MILLISECONDS);
    RouteStrategyFactory.init();
    //        new Thread(tableStructureCheck()).start();
    //XA Init recovery Log
    LOGGER.info("===============================================");
    LOGGER.info("Perform XA recovery log ...");
    performXARecoveryLog();
    if (isUseZkSwitch()) {
        //首次启动如果发现zk上dnindex为空,则将本地初始化上zk
        initZkDnindex();
    }
    initRuleData();
    startup.set(true);
}
Also used : AIOConnector(io.mycat.net.AIOConnector) ThreadFactory(java.util.concurrent.ThreadFactory) SystemConfig(io.mycat.config.model.SystemConfig) NIOReactorPool(io.mycat.net.NIOReactorPool) ManagerConnectionFactory(io.mycat.manager.ManagerConnectionFactory) PhysicalDBPool(io.mycat.backend.datasource.PhysicalDBPool) NIOProcessor(io.mycat.net.NIOProcessor) ServerConnectionFactory(io.mycat.server.ServerConnectionFactory) NIOConnector(io.mycat.net.NIOConnector) SocketAcceptor(io.mycat.net.SocketAcceptor) DirectByteBufferPool(io.mycat.buffer.DirectByteBufferPool) NIOAcceptor(io.mycat.net.NIOAcceptor) AIOAcceptor(io.mycat.net.AIOAcceptor) MyCatMemory(io.mycat.memory.MyCatMemory)

Aggregations

ThreadFactory (java.util.concurrent.ThreadFactory)250 ThreadPoolExecutor (java.util.concurrent.ThreadPoolExecutor)47 ThreadFactoryBuilder (com.google.common.util.concurrent.ThreadFactoryBuilder)46 ExecutorService (java.util.concurrent.ExecutorService)45 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)35 ScheduledExecutorService (java.util.concurrent.ScheduledExecutorService)21 ScheduledThreadPoolExecutor (java.util.concurrent.ScheduledThreadPoolExecutor)19 Test (org.junit.Test)17 LinkedBlockingQueue (java.util.concurrent.LinkedBlockingQueue)16 Future (java.util.concurrent.Future)15 NioEventLoopGroup (io.netty.channel.nio.NioEventLoopGroup)13 ArrayList (java.util.ArrayList)13 LoggingThreadGroup (org.apache.geode.internal.logging.LoggingThreadGroup)12 IOException (java.io.IOException)10 AtomicLong (java.util.concurrent.atomic.AtomicLong)10 ExecutionException (java.util.concurrent.ExecutionException)9 Executor (java.util.concurrent.Executor)9 ChannelFuture (io.netty.channel.ChannelFuture)8 DefaultThreadFactory (io.netty.util.concurrent.DefaultThreadFactory)8 SynchronousQueue (java.util.concurrent.SynchronousQueue)7