use of org.apache.hadoop.hdfs.server.protocol.RemoteEditLog in project hadoop by apache.
the class FileJournalManager method getRemoteEditLogs.
/**
* Find all editlog segments starting at or above the given txid.
* @param firstTxId the txnid which to start looking
* @param inProgressOk whether or not to include the in-progress edit log
* segment
* @return a list of remote edit logs
* @throws IOException if edit logs cannot be listed.
*/
public List<RemoteEditLog> getRemoteEditLogs(long firstTxId, boolean inProgressOk) throws IOException {
File currentDir = sd.getCurrentDir();
List<EditLogFile> allLogFiles = matchEditLogs(currentDir);
List<RemoteEditLog> ret = Lists.newArrayListWithCapacity(allLogFiles.size());
for (EditLogFile elf : allLogFiles) {
if (elf.hasCorruptHeader() || (!inProgressOk && elf.isInProgress())) {
continue;
}
if (elf.isInProgress()) {
try {
elf.scanLog(getLastReadableTxId(), true);
} catch (IOException e) {
LOG.error("got IOException while trying to validate header of " + elf + ". Skipping.", e);
continue;
}
}
if (elf.getFirstTxId() >= firstTxId) {
ret.add(new RemoteEditLog(elf.firstTxId, elf.lastTxId, elf.isInProgress()));
} else if (elf.getFirstTxId() < firstTxId && firstTxId <= elf.getLastTxId()) {
// If the firstTxId is in the middle of an edit log segment. Return this
// anyway and let the caller figure out whether it wants to use it.
ret.add(new RemoteEditLog(elf.firstTxId, elf.lastTxId, elf.isInProgress()));
}
}
Collections.sort(ret);
return ret;
}
use of org.apache.hadoop.hdfs.server.protocol.RemoteEditLog in project hadoop by apache.
the class TestPBHelper method testConvertRemoteEditLogManifest.
@Test
public void testConvertRemoteEditLogManifest() {
List<RemoteEditLog> logs = new ArrayList<RemoteEditLog>();
logs.add(new RemoteEditLog(1, 10));
logs.add(new RemoteEditLog(11, 20));
convertAndCheckRemoteEditLogManifest(new RemoteEditLogManifest(logs, 20), logs, 20);
convertAndCheckRemoteEditLogManifest(new RemoteEditLogManifest(logs), logs, HdfsServerConstants.INVALID_TXID);
}
use of org.apache.hadoop.hdfs.server.protocol.RemoteEditLog in project hadoop by apache.
the class Checkpointer method doCheckpoint.
/**
* Create a new checkpoint
*/
void doCheckpoint() throws IOException {
BackupImage bnImage = getFSImage();
NNStorage bnStorage = bnImage.getStorage();
long startTime = monotonicNow();
bnImage.freezeNamespaceAtNextRoll();
NamenodeCommand cmd = getRemoteNamenodeProxy().startCheckpoint(backupNode.getRegistration());
CheckpointCommand cpCmd = null;
switch(cmd.getAction()) {
case NamenodeProtocol.ACT_SHUTDOWN:
shutdown();
throw new IOException("Name-node " + backupNode.nnRpcAddress + " requested shutdown.");
case NamenodeProtocol.ACT_CHECKPOINT:
cpCmd = (CheckpointCommand) cmd;
break;
default:
throw new IOException("Unsupported NamenodeCommand: " + cmd.getAction());
}
bnImage.waitUntilNamespaceFrozen();
CheckpointSignature sig = cpCmd.getSignature();
// Make sure we're talking to the same NN!
sig.validateStorageInfo(bnImage);
long lastApplied = bnImage.getLastAppliedTxId();
LOG.debug("Doing checkpoint. Last applied: " + lastApplied);
RemoteEditLogManifest manifest = getRemoteNamenodeProxy().getEditLogManifest(bnImage.getLastAppliedTxId() + 1);
boolean needReloadImage = false;
if (!manifest.getLogs().isEmpty()) {
RemoteEditLog firstRemoteLog = manifest.getLogs().get(0);
// to download and load the image.
if (firstRemoteLog.getStartTxId() > lastApplied + 1) {
LOG.info("Unable to roll forward using only logs. Downloading " + "image with txid " + sig.mostRecentCheckpointTxId);
MD5Hash downloadedHash = TransferFsImage.downloadImageToStorage(backupNode.nnHttpAddress, sig.mostRecentCheckpointTxId, bnStorage, true, false);
bnImage.saveDigestAndRenameCheckpointImage(NameNodeFile.IMAGE, sig.mostRecentCheckpointTxId, downloadedHash);
lastApplied = sig.mostRecentCheckpointTxId;
needReloadImage = true;
}
if (firstRemoteLog.getStartTxId() > lastApplied + 1) {
throw new IOException("No logs to roll forward from " + lastApplied);
}
// get edits files
for (RemoteEditLog log : manifest.getLogs()) {
TransferFsImage.downloadEditsToStorage(backupNode.nnHttpAddress, log, bnStorage);
}
if (needReloadImage) {
LOG.info("Loading image with txid " + sig.mostRecentCheckpointTxId);
File file = bnStorage.findImageFile(NameNodeFile.IMAGE, sig.mostRecentCheckpointTxId);
bnImage.reloadFromImageFile(file, backupNode.getNamesystem());
}
rollForwardByApplyingLogs(manifest, bnImage, backupNode.getNamesystem());
}
long txid = bnImage.getLastAppliedTxId();
backupNode.namesystem.writeLock();
try {
backupNode.namesystem.setImageLoaded();
if (backupNode.namesystem.getBlocksTotal() > 0) {
long completeBlocksTotal = backupNode.namesystem.getCompleteBlocksTotal();
backupNode.namesystem.getBlockManager().setBlockTotal(completeBlocksTotal);
}
bnImage.saveFSImageInAllDirs(backupNode.getNamesystem(), txid);
if (!backupNode.namenode.isRollingUpgrade()) {
bnImage.updateStorageVersion();
}
} finally {
backupNode.namesystem.writeUnlock("doCheckpoint");
}
if (cpCmd.needToReturnImage()) {
TransferFsImage.uploadImageFromStorage(backupNode.nnHttpAddress, conf, bnStorage, NameNodeFile.IMAGE, txid);
}
getRemoteNamenodeProxy().endCheckpoint(backupNode.getRegistration(), sig);
if (backupNode.getRole() == NamenodeRole.BACKUP) {
bnImage.convergeJournalSpool();
}
// keep registration up to date
backupNode.setRegistration();
long imageSize = bnImage.getStorage().getFsImageName(txid).length();
LOG.info("Checkpoint completed in " + (monotonicNow() - startTime) / 1000 + " seconds." + " New Image Size: " + imageSize);
}
use of org.apache.hadoop.hdfs.server.protocol.RemoteEditLog in project hadoop by apache.
the class Checkpointer method rollForwardByApplyingLogs.
static void rollForwardByApplyingLogs(RemoteEditLogManifest manifest, FSImage dstImage, FSNamesystem dstNamesystem) throws IOException {
NNStorage dstStorage = dstImage.getStorage();
List<EditLogInputStream> editsStreams = Lists.newArrayList();
for (RemoteEditLog log : manifest.getLogs()) {
if (log.getEndTxId() > dstImage.getLastAppliedTxId()) {
File f = dstStorage.findFinalizedEditsFile(log.getStartTxId(), log.getEndTxId());
editsStreams.add(new EditLogFileInputStream(f, log.getStartTxId(), log.getEndTxId(), true));
}
}
LOG.info("Checkpointer about to load edits from " + editsStreams.size() + " stream(s).");
dstImage.loadEdits(editsStreams, dstNamesystem);
}
use of org.apache.hadoop.hdfs.server.protocol.RemoteEditLog in project hadoop by apache.
the class QuorumJournalManager method selectInputStreams.
@Override
public void selectInputStreams(Collection<EditLogInputStream> streams, long fromTxnId, boolean inProgressOk, boolean onlyDurableTxns) throws IOException {
QuorumCall<AsyncLogger, RemoteEditLogManifest> q = loggers.getEditLogManifest(fromTxnId, inProgressOk);
Map<AsyncLogger, RemoteEditLogManifest> resps = loggers.waitForWriteQuorum(q, selectInputStreamsTimeoutMs, "selectInputStreams");
LOG.debug("selectInputStream manifests:\n" + Joiner.on("\n").withKeyValueSeparator(": ").join(resps));
final PriorityQueue<EditLogInputStream> allStreams = new PriorityQueue<EditLogInputStream>(64, JournalSet.EDIT_LOG_INPUT_STREAM_COMPARATOR);
for (Map.Entry<AsyncLogger, RemoteEditLogManifest> e : resps.entrySet()) {
AsyncLogger logger = e.getKey();
RemoteEditLogManifest manifest = e.getValue();
long committedTxnId = manifest.getCommittedTxnId();
for (RemoteEditLog remoteLog : manifest.getLogs()) {
URL url = logger.buildURLToFetchLogs(remoteLog.getStartTxId());
long endTxId = remoteLog.getEndTxId();
// than committedTxnId. This ensures the consistency.
if (onlyDurableTxns && inProgressOk) {
endTxId = Math.min(endTxId, committedTxnId);
}
EditLogInputStream elis = EditLogFileInputStream.fromUrl(connectionFactory, url, remoteLog.getStartTxId(), endTxId, remoteLog.isInProgress());
allStreams.add(elis);
}
}
JournalSet.chainAndMakeRedundantStreams(streams, allStreams, fromTxnId);
}
Aggregations