Search in sources :

Example 1 with Daemon

use of org.apache.hadoop.util.Daemon in project hadoop by apache.

the class AbstractDelegationTokenSecretManager method startThreads.

/** should be called before this object is used */
public void startThreads() throws IOException {
    Preconditions.checkState(!running);
    updateCurrentKey();
    synchronized (this) {
        running = true;
        tokenRemoverThread = new Daemon(new ExpiredTokenRemover());
        tokenRemoverThread.start();
    }
}
Also used : Daemon(org.apache.hadoop.util.Daemon)

Example 2 with Daemon

use of org.apache.hadoop.util.Daemon in project hadoop by apache.

the class FSNamesystem method startActiveServices.

/**
   * Start services required in active state
   * @throws IOException
   */
void startActiveServices() throws IOException {
    startingActiveService = true;
    LOG.info("Starting services required for active state");
    writeLock();
    try {
        FSEditLog editLog = getFSImage().getEditLog();
        if (!editLog.isOpenForWrite()) {
            // During startup, we're already open for write during initialization.
            editLog.initJournalsForWrite();
            // May need to recover
            editLog.recoverUnclosedStreams();
            LOG.info("Catching up to latest edits from old active before " + "taking over writer role in edits logs");
            editLogTailer.catchupDuringFailover();
            blockManager.setPostponeBlocksFromFuture(false);
            blockManager.getDatanodeManager().markAllDatanodesStale();
            blockManager.clearQueues();
            blockManager.processAllPendingDNMessages();
            // Only need to re-process the queue, If not in SafeMode.
            if (!isInSafeMode()) {
                LOG.info("Reprocessing replication and invalidation queues");
                blockManager.initializeReplQueues();
            }
            if (LOG.isDebugEnabled()) {
                LOG.debug("NameNode metadata after re-processing " + "replication and invalidation queues during failover:\n" + metaSaveAsString());
            }
            long nextTxId = getFSImage().getLastAppliedTxId() + 1;
            LOG.info("Will take over writing edit logs at txnid " + nextTxId);
            editLog.setNextTxId(nextTxId);
            getFSImage().editLog.openForWrite(getEffectiveLayoutVersion());
        }
        // Initialize the quota.
        dir.updateCountForQuota();
        // Enable quota checks.
        dir.enableQuotaChecks();
        if (haEnabled) {
            // Renew all of the leases before becoming active.
            // This is because, while we were in standby mode,
            // the leases weren't getting renewed on this NN.
            // Give them all a fresh start here.
            leaseManager.renewAllLeases();
        }
        leaseManager.startMonitor();
        startSecretManagerIfNecessary();
        //ResourceMonitor required only at ActiveNN. See HDFS-2914
        this.nnrmthread = new Daemon(new NameNodeResourceMonitor());
        nnrmthread.start();
        nnEditLogRoller = new Daemon(new NameNodeEditLogRoller(editLogRollerThreshold, editLogRollerInterval));
        nnEditLogRoller.start();
        if (lazyPersistFileScrubIntervalSec > 0) {
            lazyPersistFileScrubber = new Daemon(new LazyPersistFileScrubber(lazyPersistFileScrubIntervalSec));
            lazyPersistFileScrubber.start();
        } else {
            LOG.warn("Lazy persist file scrubber is disabled," + " configured scrub interval is zero.");
        }
        cacheManager.startMonitorThread();
        blockManager.getDatanodeManager().setShouldSendCachingCommands(true);
        if (provider != null) {
            edekCacheLoader = Executors.newSingleThreadExecutor(new ThreadFactoryBuilder().setDaemon(true).setNameFormat("Warm Up EDEK Cache Thread #%d").build());
            FSDirEncryptionZoneOp.warmUpEdekCache(edekCacheLoader, dir, edekCacheLoaderDelay, edekCacheLoaderInterval);
        }
    } finally {
        startingActiveService = false;
        blockManager.checkSafeMode();
        writeUnlock("startActiveServices");
    }
}
Also used : Daemon(org.apache.hadoop.util.Daemon) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder)

Example 3 with Daemon

use of org.apache.hadoop.util.Daemon in project hadoop by apache.

the class DataNodeDiskMetrics method startDiskOutlierDetectionThread.

private void startDiskOutlierDetectionThread() {
    slowDiskDetectionDaemon = new Daemon(new Runnable() {

        @Override
        public void run() {
            while (shouldRun) {
                Map<String, Double> metadataOpStats = Maps.newHashMap();
                Map<String, Double> readIoStats = Maps.newHashMap();
                Map<String, Double> writeIoStats = Maps.newHashMap();
                FsDatasetSpi.FsVolumeReferences fsVolumeReferences = null;
                try {
                    fsVolumeReferences = dn.getFSDataset().getFsVolumeReferences();
                    Iterator<FsVolumeSpi> volumeIterator = fsVolumeReferences.iterator();
                    while (volumeIterator.hasNext()) {
                        FsVolumeSpi volume = volumeIterator.next();
                        DataNodeVolumeMetrics metrics = volumeIterator.next().getMetrics();
                        String volumeName = volume.getBaseURI().getPath();
                        metadataOpStats.put(volumeName, metrics.getMetadataOperationMean());
                        readIoStats.put(volumeName, metrics.getReadIoMean());
                        writeIoStats.put(volumeName, metrics.getWriteIoMean());
                    }
                } finally {
                    if (fsVolumeReferences != null) {
                        try {
                            fsVolumeReferences.close();
                        } catch (IOException e) {
                            LOG.error("Error in releasing FS Volume references", e);
                        }
                    }
                }
                if (metadataOpStats.isEmpty() && readIoStats.isEmpty() && writeIoStats.isEmpty()) {
                    LOG.debug("No disk stats available for detecting outliers.");
                    return;
                }
                detectAndUpdateDiskOutliers(metadataOpStats, readIoStats, writeIoStats);
                try {
                    Thread.sleep(detectionInterval);
                } catch (InterruptedException e) {
                    LOG.error("Disk Outlier Detection thread interrupted", e);
                    Thread.currentThread().interrupt();
                }
            }
        }
    });
    slowDiskDetectionDaemon.start();
}
Also used : Daemon(org.apache.hadoop.util.Daemon) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi) DataNodeVolumeMetrics(org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics) IOException(java.io.IOException)

Example 4 with Daemon

use of org.apache.hadoop.util.Daemon in project hadoop by apache.

the class OpenFileCtx method waitForDump.

// Check if need to dump the new writes
private void waitForDump() {
    if (!enabledDump) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Do nothing, dump is disabled.");
        }
        return;
    }
    if (nonSequentialWriteInMemory.get() < DUMP_WRITE_WATER_MARK) {
        return;
    }
    // wake up the dumper thread to dump the data
    synchronized (this) {
        if (nonSequentialWriteInMemory.get() >= DUMP_WRITE_WATER_MARK) {
            if (LOG.isDebugEnabled()) {
                LOG.debug("Asking dumper to dump...");
            }
            if (dumpThread == null) {
                dumpThread = new Daemon(new Dumper());
                dumpThread.start();
            } else {
                this.notifyAll();
            }
        }
        while (nonSequentialWriteInMemory.get() >= DUMP_WRITE_WATER_MARK) {
            try {
                this.wait();
            } catch (InterruptedException ignored) {
            }
        }
    }
}
Also used : Daemon(org.apache.hadoop.util.Daemon)

Example 5 with Daemon

use of org.apache.hadoop.util.Daemon in project hadoop by apache.

the class JournalNodeSyncer method startSyncJournalsDaemon.

private void startSyncJournalsDaemon() {
    syncJournalDaemon = new Daemon(new Runnable() {

        @Override
        public void run() {
            while (shouldSync) {
                try {
                    if (!journal.isFormatted()) {
                        LOG.warn("Journal not formatted. Cannot sync.");
                    } else {
                        syncJournals();
                    }
                    Thread.sleep(journalSyncInterval);
                } catch (Throwable t) {
                    if (!shouldSync) {
                        if (t instanceof InterruptedException) {
                            LOG.info("Stopping JournalNode Sync.");
                        } else {
                            LOG.warn("JournalNodeSyncer received an exception while " + "shutting down.", t);
                        }
                        break;
                    } else {
                        if (t instanceof InterruptedException) {
                            LOG.warn("JournalNodeSyncer interrupted", t);
                            break;
                        }
                    }
                    LOG.error("JournalNodeSyncer daemon received Runtime exception. ", t);
                }
            }
        }
    });
    syncJournalDaemon.start();
}
Also used : Daemon(org.apache.hadoop.util.Daemon)

Aggregations

Daemon (org.apache.hadoop.util.Daemon)25 IOException (java.io.IOException)11 File (java.io.File)3 Method (java.lang.reflect.Method)2 Test (org.junit.Test)2 VisibleForTesting (com.google.common.annotations.VisibleForTesting)1 ThreadFactoryBuilder (com.google.common.util.concurrent.ThreadFactoryBuilder)1 EventLoop (io.netty.channel.EventLoop)1 EOFException (java.io.EOFException)1 FileNotFoundException (java.io.FileNotFoundException)1 OutputStreamWriter (java.io.OutputStreamWriter)1 RandomAccessFile (java.io.RandomAccessFile)1 Writer (java.io.Writer)1 Field (java.lang.reflect.Field)1 SocketTimeoutException (java.net.SocketTimeoutException)1 AsynchronousCloseException (java.nio.channels.AsynchronousCloseException)1 ArrayList (java.util.ArrayList)1 ExecutionException (java.util.concurrent.ExecutionException)1 HadoopIllegalArgumentException (org.apache.hadoop.HadoopIllegalArgumentException)1 Path (org.apache.hadoop.fs.Path)1