Search in sources :

Example 1 with ThreadFactoryBuilder

use of com.google.common.util.concurrent.ThreadFactoryBuilder in project hadoop by apache.

the class FSNamesystem method startActiveServices.

/**
   * Start services required in active state
   * @throws IOException
   */
void startActiveServices() throws IOException {
    startingActiveService = true;
    LOG.info("Starting services required for active state");
    writeLock();
    try {
        FSEditLog editLog = getFSImage().getEditLog();
        if (!editLog.isOpenForWrite()) {
            // During startup, we're already open for write during initialization.
            editLog.initJournalsForWrite();
            // May need to recover
            editLog.recoverUnclosedStreams();
            LOG.info("Catching up to latest edits from old active before " + "taking over writer role in edits logs");
            editLogTailer.catchupDuringFailover();
            blockManager.setPostponeBlocksFromFuture(false);
            blockManager.getDatanodeManager().markAllDatanodesStale();
            blockManager.clearQueues();
            blockManager.processAllPendingDNMessages();
            // Only need to re-process the queue, If not in SafeMode.
            if (!isInSafeMode()) {
                LOG.info("Reprocessing replication and invalidation queues");
                blockManager.initializeReplQueues();
            }
            if (LOG.isDebugEnabled()) {
                LOG.debug("NameNode metadata after re-processing " + "replication and invalidation queues during failover:\n" + metaSaveAsString());
            }
            long nextTxId = getFSImage().getLastAppliedTxId() + 1;
            LOG.info("Will take over writing edit logs at txnid " + nextTxId);
            editLog.setNextTxId(nextTxId);
            getFSImage().editLog.openForWrite(getEffectiveLayoutVersion());
        }
        // Initialize the quota.
        dir.updateCountForQuota();
        // Enable quota checks.
        dir.enableQuotaChecks();
        if (haEnabled) {
            // Renew all of the leases before becoming active.
            // This is because, while we were in standby mode,
            // the leases weren't getting renewed on this NN.
            // Give them all a fresh start here.
            leaseManager.renewAllLeases();
        }
        leaseManager.startMonitor();
        startSecretManagerIfNecessary();
        //ResourceMonitor required only at ActiveNN. See HDFS-2914
        this.nnrmthread = new Daemon(new NameNodeResourceMonitor());
        nnrmthread.start();
        nnEditLogRoller = new Daemon(new NameNodeEditLogRoller(editLogRollerThreshold, editLogRollerInterval));
        nnEditLogRoller.start();
        if (lazyPersistFileScrubIntervalSec > 0) {
            lazyPersistFileScrubber = new Daemon(new LazyPersistFileScrubber(lazyPersistFileScrubIntervalSec));
            lazyPersistFileScrubber.start();
        } else {
            LOG.warn("Lazy persist file scrubber is disabled," + " configured scrub interval is zero.");
        }
        cacheManager.startMonitorThread();
        blockManager.getDatanodeManager().setShouldSendCachingCommands(true);
        if (provider != null) {
            edekCacheLoader = Executors.newSingleThreadExecutor(new ThreadFactoryBuilder().setDaemon(true).setNameFormat("Warm Up EDEK Cache Thread #%d").build());
            FSDirEncryptionZoneOp.warmUpEdekCache(edekCacheLoader, dir, edekCacheLoaderDelay, edekCacheLoaderInterval);
        }
    } finally {
        startingActiveService = false;
        blockManager.checkSafeMode();
        writeUnlock("startActiveServices");
    }
}
Also used : Daemon(org.apache.hadoop.util.Daemon) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder)

Example 2 with ThreadFactoryBuilder

use of com.google.common.util.concurrent.ThreadFactoryBuilder in project hadoop by apache.

the class ContainerLauncherImpl method serviceStart.

protected void serviceStart() throws Exception {
    ThreadFactory tf = new ThreadFactoryBuilder().setNameFormat("ContainerLauncher #%d").setDaemon(true).build();
    // Start with a default core-pool size of 10 and change it dynamically.
    launcherPool = new HadoopThreadPoolExecutor(initialPoolSize, Integer.MAX_VALUE, 1, TimeUnit.HOURS, new LinkedBlockingQueue<Runnable>(), tf);
    eventHandlingThread = new Thread() {

        @Override
        public void run() {
            ContainerLauncherEvent event = null;
            Set<String> allNodes = new HashSet<String>();
            while (!stopped.get() && !Thread.currentThread().isInterrupted()) {
                try {
                    event = eventQueue.take();
                } catch (InterruptedException e) {
                    if (!stopped.get()) {
                        LOG.error("Returning, interrupted : " + e);
                    }
                    return;
                }
                allNodes.add(event.getContainerMgrAddress());
                int poolSize = launcherPool.getCorePoolSize();
                // maximum limit yet.
                if (poolSize != limitOnPoolSize) {
                    // nodes where containers will run at *this* point of time. This is
                    // *not* the cluster size and doesn't need to be.
                    int numNodes = allNodes.size();
                    int idealPoolSize = Math.min(limitOnPoolSize, numNodes);
                    if (poolSize < idealPoolSize) {
                        // Bump up the pool size to idealPoolSize+initialPoolSize, the
                        // later is just a buffer so we are not always increasing the
                        // pool-size
                        int newPoolSize = Math.min(limitOnPoolSize, idealPoolSize + initialPoolSize);
                        LOG.info("Setting ContainerLauncher pool size to " + newPoolSize + " as number-of-nodes to talk to is " + numNodes);
                        launcherPool.setCorePoolSize(newPoolSize);
                    }
                }
                // the events from the queue are handled in parallel
                // using a thread pool
                launcherPool.execute(createEventProcessor(event));
            // TODO: Group launching of multiple containers to a single
            // NodeManager into a single connection
            }
        }
    };
    eventHandlingThread.setName("ContainerLauncher Event Handler");
    eventHandlingThread.start();
    super.serviceStart();
}
Also used : ThreadFactory(java.util.concurrent.ThreadFactory) HashSet(java.util.HashSet) Set(java.util.Set) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) HadoopThreadPoolExecutor(org.apache.hadoop.util.concurrent.HadoopThreadPoolExecutor)

Example 3 with ThreadFactoryBuilder

use of com.google.common.util.concurrent.ThreadFactoryBuilder in project hadoop by apache.

the class CommitterEventHandler method serviceStart.

@Override
protected void serviceStart() throws Exception {
    ThreadFactoryBuilder tfBuilder = new ThreadFactoryBuilder().setNameFormat("CommitterEvent Processor #%d");
    if (jobClassLoader != null) {
        // if the job classloader is enabled, we need to use the job classloader
        // as the thread context classloader (TCCL) of these threads in case the
        // committer needs to load another class via TCCL
        ThreadFactory backingTf = new ThreadFactory() {

            @Override
            public Thread newThread(Runnable r) {
                Thread thread = new Thread(r);
                thread.setContextClassLoader(jobClassLoader);
                return thread;
            }
        };
        tfBuilder.setThreadFactory(backingTf);
    }
    ThreadFactory tf = tfBuilder.build();
    launcherPool = new HadoopThreadPoolExecutor(5, 5, 1, TimeUnit.HOURS, new LinkedBlockingQueue<Runnable>(), tf);
    eventHandlingThread = new Thread(new Runnable() {

        @Override
        public void run() {
            CommitterEvent event = null;
            while (!stopped.get() && !Thread.currentThread().isInterrupted()) {
                try {
                    event = eventQueue.take();
                } catch (InterruptedException e) {
                    if (!stopped.get()) {
                        LOG.error("Returning, interrupted : " + e);
                    }
                    return;
                }
                // the events from the queue are handled in parallel
                // using a thread pool
                launcherPool.execute(new EventProcessor(event));
            }
        }
    });
    eventHandlingThread.setName("CommitterEvent Handler");
    eventHandlingThread.start();
    super.serviceStart();
}
Also used : ThreadFactory(java.util.concurrent.ThreadFactory) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) HadoopThreadPoolExecutor(org.apache.hadoop.util.concurrent.HadoopThreadPoolExecutor)

Example 4 with ThreadFactoryBuilder

use of com.google.common.util.concurrent.ThreadFactoryBuilder in project hadoop by apache.

the class LocalContainerLauncher method serviceStart.

public void serviceStart() throws Exception {
    // create a single thread for serial execution of tasks
    // make it a daemon thread so that the process can exit even if the task is
    // not interruptible
    taskRunner = HadoopExecutors.newSingleThreadExecutor(new ThreadFactoryBuilder().setDaemon(true).setNameFormat("uber-SubtaskRunner").build());
    // create and start an event handling thread
    eventHandler = new Thread(new EventHandler(), "uber-EventHandler");
    // as well as the subtask runner threads
    if (jobClassLoader != null) {
        LOG.info("Setting " + jobClassLoader + " as the context classloader of thread " + eventHandler.getName());
        eventHandler.setContextClassLoader(jobClassLoader);
    } else {
        // note the current TCCL
        LOG.info("Context classloader of thread " + eventHandler.getName() + ": " + eventHandler.getContextClassLoader());
    }
    eventHandler.start();
    super.serviceStart();
}
Also used : ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder)

Example 5 with ThreadFactoryBuilder

use of com.google.common.util.concurrent.ThreadFactoryBuilder in project hadoop by apache.

the class NonAggregatingLogHandler method createScheduledThreadPoolExecutor.

ScheduledThreadPoolExecutor createScheduledThreadPoolExecutor(Configuration conf) {
    ThreadFactory tf = new ThreadFactoryBuilder().setNameFormat("LogDeleter #%d").build();
    sched = new HadoopScheduledThreadPoolExecutor(conf.getInt(YarnConfiguration.NM_LOG_DELETION_THREADS_COUNT, YarnConfiguration.DEFAULT_NM_LOG_DELETE_THREAD_COUNT), tf);
    return sched;
}
Also used : ThreadFactory(java.util.concurrent.ThreadFactory) HadoopScheduledThreadPoolExecutor(org.apache.hadoop.util.concurrent.HadoopScheduledThreadPoolExecutor) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder)

Aggregations

ThreadFactoryBuilder (com.google.common.util.concurrent.ThreadFactoryBuilder)124 ThreadFactory (java.util.concurrent.ThreadFactory)38 ExecutorService (java.util.concurrent.ExecutorService)35 IOException (java.io.IOException)19 ThreadPoolExecutor (java.util.concurrent.ThreadPoolExecutor)18 Future (java.util.concurrent.Future)16 ExecutionException (java.util.concurrent.ExecutionException)14 ArrayList (java.util.ArrayList)10 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)10 HashMap (java.util.HashMap)9 HashSet (java.util.HashSet)9 Callable (java.util.concurrent.Callable)9 ScheduledExecutorService (java.util.concurrent.ScheduledExecutorService)9 Path (org.apache.hadoop.fs.Path)9 Test (org.junit.Test)9 LinkedList (java.util.LinkedList)8 Map (java.util.Map)8 Before (org.junit.Before)8 LinkedBlockingQueue (java.util.concurrent.LinkedBlockingQueue)7 ScheduledThreadPoolExecutor (java.util.concurrent.ScheduledThreadPoolExecutor)7