use of org.apache.hadoop.util.Daemon in project hadoop by apache.
the class DataNode method transferBlock.
@VisibleForTesting
void transferBlock(ExtendedBlock block, DatanodeInfo[] xferTargets, StorageType[] xferTargetStorageTypes) throws IOException {
BPOfferService bpos = getBPOSForBlock(block);
DatanodeRegistration bpReg = getDNRegistrationForBP(block.getBlockPoolId());
boolean replicaNotExist = false;
boolean replicaStateNotFinalized = false;
boolean blockFileNotExist = false;
boolean lengthTooShort = false;
try {
data.checkBlock(block, block.getNumBytes(), ReplicaState.FINALIZED);
} catch (ReplicaNotFoundException e) {
replicaNotExist = true;
} catch (UnexpectedReplicaStateException e) {
replicaStateNotFinalized = true;
} catch (FileNotFoundException e) {
blockFileNotExist = true;
} catch (EOFException e) {
lengthTooShort = true;
} catch (IOException e) {
// The IOException indicates not being able to access block file,
// treat it the same here as blockFileNotExist, to trigger
// reporting it as a bad block
blockFileNotExist = true;
}
if (replicaNotExist || replicaStateNotFinalized) {
String errStr = "Can't send invalid block " + block;
LOG.info(errStr);
bpos.trySendErrorReport(DatanodeProtocol.INVALID_BLOCK, errStr);
return;
}
if (blockFileNotExist) {
// Report back to NN bad block caused by non-existent block file.
reportBadBlock(bpos, block, "Can't replicate block " + block + " because the block file doesn't exist, or is not accessible");
return;
}
if (lengthTooShort) {
// Check if NN recorded length matches on-disk length
// Shorter on-disk len indicates corruption so report NN the corrupt block
reportBadBlock(bpos, block, "Can't replicate block " + block + " because on-disk length " + data.getLength(block) + " is shorter than NameNode recorded length " + block.getNumBytes());
return;
}
int numTargets = xferTargets.length;
if (numTargets > 0) {
StringBuilder xfersBuilder = new StringBuilder();
for (int i = 0; i < numTargets; i++) {
xfersBuilder.append(xferTargets[i]);
xfersBuilder.append(" ");
}
LOG.info(bpReg + " Starting thread to transfer " + block + " to " + xfersBuilder);
new Daemon(new DataTransfer(xferTargets, xferTargetStorageTypes, block, BlockConstructionStage.PIPELINE_SETUP_CREATE, "")).start();
}
}
use of org.apache.hadoop.util.Daemon in project hadoop by apache.
the class BlockManager method processMisReplicatedBlocks.
/**
* For each block in the name-node verify whether it belongs to any file,
* extra or low redundancy. Place it into the respective queue.
*/
public void processMisReplicatedBlocks() {
assert namesystem.hasWriteLock();
stopReconstructionInitializer();
neededReconstruction.clear();
reconstructionQueuesInitializer = new Daemon() {
@Override
public void run() {
try {
processMisReplicatesAsync();
} catch (InterruptedException ie) {
LOG.info("Interrupted while processing reconstruction queues.");
} catch (Exception e) {
LOG.error("Error while processing reconstruction queues async", e);
}
}
};
reconstructionQueuesInitializer.setName("Reconstruction Queue Initializer");
reconstructionQueuesInitializer.start();
}
use of org.apache.hadoop.util.Daemon in project hadoop by apache.
the class BlockPoolSliceStorage method clearTrash.
/**
* Delete all files and directories in the trash directories.
*/
public void clearTrash() {
final List<File> trashRoots = new ArrayList<>();
for (StorageDirectory sd : getStorageDirs()) {
File trashRoot = getTrashRootDir(sd);
if (trashRoot.exists() && sd.getPreviousDir().exists()) {
LOG.error("Trash and PreviousDir shouldn't both exist for storage " + "directory " + sd);
assert false;
} else {
trashRoots.add(trashRoot);
}
}
stopTrashCleaner();
trashCleaner = new Daemon(new Runnable() {
@Override
public void run() {
for (File trashRoot : trashRoots) {
FileUtil.fullyDelete(trashRoot);
LOG.info("Cleared trash for storage directory " + trashRoot);
}
}
@Override
public String toString() {
return "clearTrash() for " + blockpoolID;
}
});
trashCleaner.start();
}
use of org.apache.hadoop.util.Daemon in project hadoop by apache.
the class BlockRecoveryWorker method recoverBlocks.
public Daemon recoverBlocks(final String who, final Collection<RecoveringBlock> blocks) {
Daemon d = new Daemon(datanode.threadGroup, new Runnable() {
@Override
public void run() {
for (RecoveringBlock b : blocks) {
try {
logRecoverBlock(who, b);
if (b.isStriped()) {
new RecoveryTaskStriped((RecoveringStripedBlock) b).recover();
} else {
new RecoveryTaskContiguous(b).recover();
}
} catch (IOException e) {
LOG.warn("recoverBlocks FAILED: " + b, e);
}
}
}
});
d.start();
return d;
}
use of org.apache.hadoop.util.Daemon in project hadoop by apache.
the class OpenFileCtx method waitForDump.
// Check if need to dump the new writes
private void waitForDump() {
if (!enabledDump) {
if (LOG.isDebugEnabled()) {
LOG.debug("Do nothing, dump is disabled.");
}
return;
}
if (nonSequentialWriteInMemory.get() < DUMP_WRITE_WATER_MARK) {
return;
}
// wake up the dumper thread to dump the data
synchronized (this) {
if (nonSequentialWriteInMemory.get() >= DUMP_WRITE_WATER_MARK) {
if (LOG.isDebugEnabled()) {
LOG.debug("Asking dumper to dump...");
}
if (dumpThread == null) {
dumpThread = new Daemon(new Dumper());
dumpThread.start();
} else {
this.notifyAll();
}
}
while (nonSequentialWriteInMemory.get() >= DUMP_WRITE_WATER_MARK) {
try {
this.wait();
} catch (InterruptedException ignored) {
}
}
}
}
Aggregations