use of org.apache.hadoop.io.MD5Hash in project hadoop by apache.
the class Checkpointer method doCheckpoint.
/**
* Create a new checkpoint
*/
void doCheckpoint() throws IOException {
BackupImage bnImage = getFSImage();
NNStorage bnStorage = bnImage.getStorage();
long startTime = monotonicNow();
bnImage.freezeNamespaceAtNextRoll();
NamenodeCommand cmd = getRemoteNamenodeProxy().startCheckpoint(backupNode.getRegistration());
CheckpointCommand cpCmd = null;
switch(cmd.getAction()) {
case NamenodeProtocol.ACT_SHUTDOWN:
shutdown();
throw new IOException("Name-node " + backupNode.nnRpcAddress + " requested shutdown.");
case NamenodeProtocol.ACT_CHECKPOINT:
cpCmd = (CheckpointCommand) cmd;
break;
default:
throw new IOException("Unsupported NamenodeCommand: " + cmd.getAction());
}
bnImage.waitUntilNamespaceFrozen();
CheckpointSignature sig = cpCmd.getSignature();
// Make sure we're talking to the same NN!
sig.validateStorageInfo(bnImage);
long lastApplied = bnImage.getLastAppliedTxId();
LOG.debug("Doing checkpoint. Last applied: " + lastApplied);
RemoteEditLogManifest manifest = getRemoteNamenodeProxy().getEditLogManifest(bnImage.getLastAppliedTxId() + 1);
boolean needReloadImage = false;
if (!manifest.getLogs().isEmpty()) {
RemoteEditLog firstRemoteLog = manifest.getLogs().get(0);
// to download and load the image.
if (firstRemoteLog.getStartTxId() > lastApplied + 1) {
LOG.info("Unable to roll forward using only logs. Downloading " + "image with txid " + sig.mostRecentCheckpointTxId);
MD5Hash downloadedHash = TransferFsImage.downloadImageToStorage(backupNode.nnHttpAddress, sig.mostRecentCheckpointTxId, bnStorage, true, false);
bnImage.saveDigestAndRenameCheckpointImage(NameNodeFile.IMAGE, sig.mostRecentCheckpointTxId, downloadedHash);
lastApplied = sig.mostRecentCheckpointTxId;
needReloadImage = true;
}
if (firstRemoteLog.getStartTxId() > lastApplied + 1) {
throw new IOException("No logs to roll forward from " + lastApplied);
}
// get edits files
for (RemoteEditLog log : manifest.getLogs()) {
TransferFsImage.downloadEditsToStorage(backupNode.nnHttpAddress, log, bnStorage);
}
if (needReloadImage) {
LOG.info("Loading image with txid " + sig.mostRecentCheckpointTxId);
File file = bnStorage.findImageFile(NameNodeFile.IMAGE, sig.mostRecentCheckpointTxId);
bnImage.reloadFromImageFile(file, backupNode.getNamesystem());
}
rollForwardByApplyingLogs(manifest, bnImage, backupNode.getNamesystem());
}
long txid = bnImage.getLastAppliedTxId();
backupNode.namesystem.writeLock();
try {
backupNode.namesystem.setImageLoaded();
if (backupNode.namesystem.getBlocksTotal() > 0) {
long completeBlocksTotal = backupNode.namesystem.getCompleteBlocksTotal();
backupNode.namesystem.getBlockManager().setBlockTotal(completeBlocksTotal);
}
bnImage.saveFSImageInAllDirs(backupNode.getNamesystem(), txid);
if (!backupNode.namenode.isRollingUpgrade()) {
bnImage.updateStorageVersion();
}
} finally {
backupNode.namesystem.writeUnlock("doCheckpoint");
}
if (cpCmd.needToReturnImage()) {
TransferFsImage.uploadImageFromStorage(backupNode.nnHttpAddress, conf, bnStorage, NameNodeFile.IMAGE, txid);
}
getRemoteNamenodeProxy().endCheckpoint(backupNode.getRegistration(), sig);
if (backupNode.getRole() == NamenodeRole.BACKUP) {
bnImage.convergeJournalSpool();
}
// keep registration up to date
backupNode.setRegistration();
long imageSize = bnImage.getStorage().getFsImageName(txid).length();
LOG.info("Checkpoint completed in " + (monotonicNow() - startTime) / 1000 + " seconds." + " New Image Size: " + imageSize);
}
use of org.apache.hadoop.io.MD5Hash in project hadoop by apache.
the class BootstrapStandby method downloadImage.
private int downloadImage(NNStorage storage, NamenodeProtocol proxy, RemoteNameNodeInfo proxyInfo) throws IOException {
// Load the newly formatted image, using all of the directories
// (including shared edits)
final long imageTxId = proxy.getMostRecentCheckpointTxId();
final long curTxId = proxy.getTransactionID();
FSImage image = new FSImage(conf);
try {
image.getStorage().setStorageInfo(storage);
image.initEditLog(StartupOption.REGULAR);
assert image.getEditLog().isOpenForRead() : "Expected edit log to be open for read";
// start up from the last checkpoint on the active.
if (!skipSharedEditsCheck && !checkLogsAvailableForRead(image, imageTxId, curTxId)) {
return ERR_CODE_LOGS_UNAVAILABLE;
}
// Download that checkpoint into our storage directories.
MD5Hash hash = TransferFsImage.downloadImageToStorage(proxyInfo.getHttpAddress(), imageTxId, storage, true, true);
image.saveDigestAndRenameCheckpointImage(NameNodeFile.IMAGE, imageTxId, hash);
// Write seen_txid to the formatted image directories.
storage.writeTransactionIdFileToStorage(imageTxId, NameNodeDirType.IMAGE);
} catch (IOException ioe) {
throw ioe;
} finally {
image.close();
}
return 0;
}
use of org.apache.hadoop.io.MD5Hash in project hadoop by apache.
the class OfflineImageReconstructor method run.
/**
* Run the OfflineImageReconstructor.
*
* @param inputPath The input path to use.
* @param outputPath The output path to use.
*
* @throws Exception On error.
*/
public static void run(String inputPath, String outputPath) throws Exception {
MessageDigest digester = MD5Hash.getDigester();
FileOutputStream fout = null;
File foutHash = new File(outputPath + ".md5");
// delete any .md5 file that exists
Files.deleteIfExists(foutHash.toPath());
CountingOutputStream out = null;
FileInputStream fis = null;
InputStreamReader reader = null;
try {
Files.deleteIfExists(Paths.get(outputPath));
fout = new FileOutputStream(outputPath);
fis = new FileInputStream(inputPath);
reader = new InputStreamReader(fis, Charset.forName("UTF-8"));
out = new CountingOutputStream(new DigestOutputStream(new BufferedOutputStream(fout), digester));
OfflineImageReconstructor oir = new OfflineImageReconstructor(out, reader);
oir.processXml();
} finally {
IOUtils.cleanup(LOG, reader, fis, out, fout);
}
// Write the md5 file
MD5FileUtils.saveMD5File(new File(outputPath), new MD5Hash(digester.digest()));
}
use of org.apache.hadoop.io.MD5Hash in project hadoop by apache.
the class MD5FileUtils method computeMd5ForFile.
/**
* Read dataFile and compute its MD5 checksum.
*/
public static MD5Hash computeMd5ForFile(File dataFile) throws IOException {
InputStream in = new FileInputStream(dataFile);
try {
MessageDigest digester = MD5Hash.getDigester();
DigestInputStream dis = new DigestInputStream(in, digester);
IOUtils.copyBytes(dis, new IOUtils.NullOutputStream(), 128 * 1024);
return new MD5Hash(digester.digest());
} finally {
IOUtils.closeStream(in);
}
}
use of org.apache.hadoop.io.MD5Hash in project hadoop by apache.
the class TestStartup method corruptFSImageMD5.
/**
* Corrupts the MD5 sum of the fsimage.
*
* @param corruptAll
* whether to corrupt one or all of the MD5 sums in the configured
* namedirs
* @throws IOException
*/
private void corruptFSImageMD5(boolean corruptAll) throws IOException {
List<URI> nameDirs = (List<URI>) FSNamesystem.getNamespaceDirs(config);
// Corrupt the md5 files in all the namedirs
for (URI uri : nameDirs) {
// Directory layout looks like:
// test/data/dfs/nameN/current/{fsimage,edits,...}
File nameDir = new File(uri.getPath());
File dfsDir = nameDir.getParentFile();
// make sure we got right dir
assertEquals(dfsDir.getName(), "dfs");
// Set the md5 file to all zeros
File imageFile = new File(nameDir, Storage.STORAGE_DIR_CURRENT + "/" + NNStorage.getImageFileName(0));
MD5FileUtils.saveMD5File(imageFile, new MD5Hash(new byte[16]));
// Only need to corrupt one if !corruptAll
if (!corruptAll) {
break;
}
}
}
Aggregations