use of org.apache.hadoop.util.Daemon in project hadoop by apache.
the class AbstractDelegationTokenSecretManager method startThreads.
/** should be called before this object is used */
public void startThreads() throws IOException {
Preconditions.checkState(!running);
updateCurrentKey();
synchronized (this) {
running = true;
tokenRemoverThread = new Daemon(new ExpiredTokenRemover());
tokenRemoverThread.start();
}
}
use of org.apache.hadoop.util.Daemon in project hadoop by apache.
the class FSNamesystem method startActiveServices.
/**
* Start services required in active state
* @throws IOException
*/
void startActiveServices() throws IOException {
startingActiveService = true;
LOG.info("Starting services required for active state");
writeLock();
try {
FSEditLog editLog = getFSImage().getEditLog();
if (!editLog.isOpenForWrite()) {
// During startup, we're already open for write during initialization.
editLog.initJournalsForWrite();
// May need to recover
editLog.recoverUnclosedStreams();
LOG.info("Catching up to latest edits from old active before " + "taking over writer role in edits logs");
editLogTailer.catchupDuringFailover();
blockManager.setPostponeBlocksFromFuture(false);
blockManager.getDatanodeManager().markAllDatanodesStale();
blockManager.clearQueues();
blockManager.processAllPendingDNMessages();
// Only need to re-process the queue, If not in SafeMode.
if (!isInSafeMode()) {
LOG.info("Reprocessing replication and invalidation queues");
blockManager.initializeReplQueues();
}
if (LOG.isDebugEnabled()) {
LOG.debug("NameNode metadata after re-processing " + "replication and invalidation queues during failover:\n" + metaSaveAsString());
}
long nextTxId = getFSImage().getLastAppliedTxId() + 1;
LOG.info("Will take over writing edit logs at txnid " + nextTxId);
editLog.setNextTxId(nextTxId);
getFSImage().editLog.openForWrite(getEffectiveLayoutVersion());
}
// Initialize the quota.
dir.updateCountForQuota();
// Enable quota checks.
dir.enableQuotaChecks();
if (haEnabled) {
// Renew all of the leases before becoming active.
// This is because, while we were in standby mode,
// the leases weren't getting renewed on this NN.
// Give them all a fresh start here.
leaseManager.renewAllLeases();
}
leaseManager.startMonitor();
startSecretManagerIfNecessary();
//ResourceMonitor required only at ActiveNN. See HDFS-2914
this.nnrmthread = new Daemon(new NameNodeResourceMonitor());
nnrmthread.start();
nnEditLogRoller = new Daemon(new NameNodeEditLogRoller(editLogRollerThreshold, editLogRollerInterval));
nnEditLogRoller.start();
if (lazyPersistFileScrubIntervalSec > 0) {
lazyPersistFileScrubber = new Daemon(new LazyPersistFileScrubber(lazyPersistFileScrubIntervalSec));
lazyPersistFileScrubber.start();
} else {
LOG.warn("Lazy persist file scrubber is disabled," + " configured scrub interval is zero.");
}
cacheManager.startMonitorThread();
blockManager.getDatanodeManager().setShouldSendCachingCommands(true);
if (provider != null) {
edekCacheLoader = Executors.newSingleThreadExecutor(new ThreadFactoryBuilder().setDaemon(true).setNameFormat("Warm Up EDEK Cache Thread #%d").build());
FSDirEncryptionZoneOp.warmUpEdekCache(edekCacheLoader, dir, edekCacheLoaderDelay, edekCacheLoaderInterval);
}
} finally {
startingActiveService = false;
blockManager.checkSafeMode();
writeUnlock("startActiveServices");
}
}
use of org.apache.hadoop.util.Daemon in project hadoop by apache.
the class DataNodeDiskMetrics method startDiskOutlierDetectionThread.
private void startDiskOutlierDetectionThread() {
slowDiskDetectionDaemon = new Daemon(new Runnable() {
@Override
public void run() {
while (shouldRun) {
Map<String, Double> metadataOpStats = Maps.newHashMap();
Map<String, Double> readIoStats = Maps.newHashMap();
Map<String, Double> writeIoStats = Maps.newHashMap();
FsDatasetSpi.FsVolumeReferences fsVolumeReferences = null;
try {
fsVolumeReferences = dn.getFSDataset().getFsVolumeReferences();
Iterator<FsVolumeSpi> volumeIterator = fsVolumeReferences.iterator();
while (volumeIterator.hasNext()) {
FsVolumeSpi volume = volumeIterator.next();
DataNodeVolumeMetrics metrics = volumeIterator.next().getMetrics();
String volumeName = volume.getBaseURI().getPath();
metadataOpStats.put(volumeName, metrics.getMetadataOperationMean());
readIoStats.put(volumeName, metrics.getReadIoMean());
writeIoStats.put(volumeName, metrics.getWriteIoMean());
}
} finally {
if (fsVolumeReferences != null) {
try {
fsVolumeReferences.close();
} catch (IOException e) {
LOG.error("Error in releasing FS Volume references", e);
}
}
}
if (metadataOpStats.isEmpty() && readIoStats.isEmpty() && writeIoStats.isEmpty()) {
LOG.debug("No disk stats available for detecting outliers.");
return;
}
detectAndUpdateDiskOutliers(metadataOpStats, readIoStats, writeIoStats);
try {
Thread.sleep(detectionInterval);
} catch (InterruptedException e) {
LOG.error("Disk Outlier Detection thread interrupted", e);
Thread.currentThread().interrupt();
}
}
}
});
slowDiskDetectionDaemon.start();
}
use of org.apache.hadoop.util.Daemon in project hadoop by apache.
the class OpenFileCtx method waitForDump.
// Check if need to dump the new writes
private void waitForDump() {
if (!enabledDump) {
if (LOG.isDebugEnabled()) {
LOG.debug("Do nothing, dump is disabled.");
}
return;
}
if (nonSequentialWriteInMemory.get() < DUMP_WRITE_WATER_MARK) {
return;
}
// wake up the dumper thread to dump the data
synchronized (this) {
if (nonSequentialWriteInMemory.get() >= DUMP_WRITE_WATER_MARK) {
if (LOG.isDebugEnabled()) {
LOG.debug("Asking dumper to dump...");
}
if (dumpThread == null) {
dumpThread = new Daemon(new Dumper());
dumpThread.start();
} else {
this.notifyAll();
}
}
while (nonSequentialWriteInMemory.get() >= DUMP_WRITE_WATER_MARK) {
try {
this.wait();
} catch (InterruptedException ignored) {
}
}
}
}
use of org.apache.hadoop.util.Daemon in project hadoop by apache.
the class JournalNodeSyncer method startSyncJournalsDaemon.
private void startSyncJournalsDaemon() {
syncJournalDaemon = new Daemon(new Runnable() {
@Override
public void run() {
while (shouldSync) {
try {
if (!journal.isFormatted()) {
LOG.warn("Journal not formatted. Cannot sync.");
} else {
syncJournals();
}
Thread.sleep(journalSyncInterval);
} catch (Throwable t) {
if (!shouldSync) {
if (t instanceof InterruptedException) {
LOG.info("Stopping JournalNode Sync.");
} else {
LOG.warn("JournalNodeSyncer received an exception while " + "shutting down.", t);
}
break;
} else {
if (t instanceof InterruptedException) {
LOG.warn("JournalNodeSyncer interrupted", t);
break;
}
}
LOG.error("JournalNodeSyncer daemon received Runtime exception. ", t);
}
}
}
});
syncJournalDaemon.start();
}
Aggregations