use of org.apache.hadoop.util.Daemon in project hadoop by apache.
the class JournalNodeSyncer method startSyncJournalsDaemon.
private void startSyncJournalsDaemon() {
syncJournalDaemon = new Daemon(new Runnable() {
@Override
public void run() {
while (shouldSync) {
try {
if (!journal.isFormatted()) {
LOG.warn("Journal not formatted. Cannot sync.");
} else {
syncJournals();
}
Thread.sleep(journalSyncInterval);
} catch (Throwable t) {
if (!shouldSync) {
if (t instanceof InterruptedException) {
LOG.info("Stopping JournalNode Sync.");
} else {
LOG.warn("JournalNodeSyncer received an exception while " + "shutting down.", t);
}
break;
} else {
if (t instanceof InterruptedException) {
LOG.warn("JournalNodeSyncer interrupted", t);
break;
}
}
LOG.error("JournalNodeSyncer daemon received Runtime exception. ", t);
}
}
}
});
syncJournalDaemon.start();
}
use of org.apache.hadoop.util.Daemon in project hadoop by apache.
the class SecondaryNameNode method startCheckpointThread.
public void startCheckpointThread() {
Preconditions.checkState(checkpointThread == null, "Should not already have a thread");
Preconditions.checkState(shouldRun, "shouldRun should be true");
checkpointThread = new Daemon(this);
checkpointThread.start();
}
use of org.apache.hadoop.util.Daemon in project hadoop by apache.
the class DataNodeDiskMetrics method startDiskOutlierDetectionThread.
private void startDiskOutlierDetectionThread() {
slowDiskDetectionDaemon = new Daemon(new Runnable() {
@Override
public void run() {
while (shouldRun) {
Map<String, Double> metadataOpStats = Maps.newHashMap();
Map<String, Double> readIoStats = Maps.newHashMap();
Map<String, Double> writeIoStats = Maps.newHashMap();
FsDatasetSpi.FsVolumeReferences fsVolumeReferences = null;
try {
fsVolumeReferences = dn.getFSDataset().getFsVolumeReferences();
Iterator<FsVolumeSpi> volumeIterator = fsVolumeReferences.iterator();
while (volumeIterator.hasNext()) {
FsVolumeSpi volume = volumeIterator.next();
DataNodeVolumeMetrics metrics = volumeIterator.next().getMetrics();
String volumeName = volume.getBaseURI().getPath();
metadataOpStats.put(volumeName, metrics.getMetadataOperationMean());
readIoStats.put(volumeName, metrics.getReadIoMean());
writeIoStats.put(volumeName, metrics.getWriteIoMean());
}
} finally {
if (fsVolumeReferences != null) {
try {
fsVolumeReferences.close();
} catch (IOException e) {
LOG.error("Error in releasing FS Volume references", e);
}
}
}
if (metadataOpStats.isEmpty() && readIoStats.isEmpty() && writeIoStats.isEmpty()) {
LOG.debug("No disk stats available for detecting outliers.");
return;
}
detectAndUpdateDiskOutliers(metadataOpStats, readIoStats, writeIoStats);
try {
Thread.sleep(detectionInterval);
} catch (InterruptedException e) {
LOG.error("Disk Outlier Detection thread interrupted", e);
Thread.currentThread().interrupt();
}
}
}
});
slowDiskDetectionDaemon.start();
}
use of org.apache.hadoop.util.Daemon in project hive by apache.
the class TokenStoreDelegationTokenSecretManager method startThreads.
@Override
public synchronized void startThreads() throws IOException {
try {
// updateCurrentKey needs to be called to initialize the master key
// (there should be a null check added in the future in rollMasterKey)
// updateCurrentKey();
Method m = AbstractDelegationTokenSecretManager.class.getDeclaredMethod("updateCurrentKey");
m.setAccessible(true);
m.invoke(this);
} catch (Exception e) {
throw new IOException("Failed to initialize master key", e);
}
running = true;
tokenRemoverThread = new Daemon(new ExpiredTokenRemover());
tokenRemoverThread.start();
}
use of org.apache.hadoop.util.Daemon in project hive by apache.
the class JvmPauseMonitor method start.
public void start() {
Preconditions.checkState(monitorThread == null, "JvmPauseMonitor thread is Already started");
monitorThread = new Daemon(new Monitor());
monitorThread.start();
}
Aggregations