use of org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile in project hadoop by apache.
the class FSImageTestUtil method findLatestEditsLog.
/**
* @return the latest edits log, finalized or otherwise, from the given
* storage directory.
*/
public static EditLogFile findLatestEditsLog(StorageDirectory sd) throws IOException {
File currentDir = sd.getCurrentDir();
List<EditLogFile> foundEditLogs = Lists.newArrayList(FileJournalManager.matchEditLogs(currentDir));
return Collections.max(foundEditLogs, EditLogFile.COMPARE_BY_START_TXID);
}
use of org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile in project hadoop by apache.
the class TestCheckpoint method testSecondaryPurgesEditLogs.
/**
* Regression test for HDFS-3678 "Edit log files are never being purged from 2NN"
*/
@Test
public void testSecondaryPurgesEditLogs() throws IOException {
MiniDFSCluster cluster = null;
SecondaryNameNode secondary = null;
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY, 0);
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
FileSystem fs = cluster.getFileSystem();
fs.mkdirs(new Path("/foo"));
secondary = startSecondaryNameNode(conf);
// several edit log segments on the 2NN.
for (int i = 0; i < 5; i++) {
secondary.doCheckpoint();
}
// Make sure there are no more edit log files than there should be.
List<File> checkpointDirs = getCheckpointCurrentDirs(secondary);
for (File checkpointDir : checkpointDirs) {
List<EditLogFile> editsFiles = FileJournalManager.matchEditLogs(checkpointDir);
assertEquals("Edit log files were not purged from 2NN", 1, editsFiles.size());
}
} finally {
cleanup(secondary);
secondary = null;
cleanup(cluster);
cluster = null;
}
}
use of org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile in project hadoop by apache.
the class Journal method startLogSegment.
/**
* Start a new segment at the given txid. The previous segment
* must have already been finalized.
*/
public synchronized void startLogSegment(RequestInfo reqInfo, long txid, int layoutVersion) throws IOException {
assert fjm != null;
checkFormatted();
checkRequest(reqInfo);
if (curSegment != null) {
LOG.warn("Client is requesting a new log segment " + txid + " though we are already writing " + curSegment + ". " + "Aborting the current segment in order to begin the new one.");
// The writer may have lost a connection to us and is now
// re-connecting after the connection came back.
// We should abort our own old segment.
abortCurSegment();
}
// Paranoid sanity check: we should never overwrite a finalized log file.
// Additionally, if it's in-progress, it should have at most 1 transaction.
// This can happen if the writer crashes exactly at the start of a segment.
EditLogFile existing = fjm.getLogFile(txid);
if (existing != null) {
if (!existing.isInProgress()) {
throw new IllegalStateException("Already have a finalized segment " + existing + " beginning at " + txid);
}
// If it's in-progress, it should only contain one transaction,
// because the "startLogSegment" transaction is written alone at the
// start of each segment.
existing.scanLog(Long.MAX_VALUE, false);
if (existing.getLastTxId() != existing.getFirstTxId()) {
throw new IllegalStateException("The log file " + existing + " seems to contain valid transactions");
}
}
long curLastWriterEpoch = lastWriterEpoch.get();
if (curLastWriterEpoch != reqInfo.getEpoch()) {
LOG.info("Updating lastWriterEpoch from " + curLastWriterEpoch + " to " + reqInfo.getEpoch() + " for client " + Server.getRemoteIp());
lastWriterEpoch.set(reqInfo.getEpoch());
}
// The fact that we are starting a segment at this txid indicates
// that any previous recovery for this same segment was aborted.
// Otherwise, no writer would have started writing. So, we can
// remove the record of the older segment here.
purgePaxosDecision(txid);
curSegment = fjm.startLogSegment(txid, layoutVersion);
curSegmentTxId = txid;
nextTxId = txid;
}
use of org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile in project hadoop by apache.
the class Journal method getSegmentInfo.
/**
* @return the current state of the given segment, or null if the
* segment does not exist.
*/
@VisibleForTesting
SegmentStateProto getSegmentInfo(long segmentTxId) throws IOException {
EditLogFile elf = fjm.getLogFile(segmentTxId);
if (elf == null) {
return null;
}
if (elf.isInProgress()) {
elf.scanLog(Long.MAX_VALUE, false);
}
if (elf.getLastTxId() == HdfsServerConstants.INVALID_TXID) {
LOG.info("Edit log file " + elf + " appears to be empty. " + "Moving it aside...");
elf.moveAsideEmptyFile();
return null;
}
SegmentStateProto ret = SegmentStateProto.newBuilder().setStartTxId(segmentTxId).setEndTxId(elf.getLastTxId()).setIsInProgress(elf.isInProgress()).build();
LOG.info("getSegmentInfo(" + segmentTxId + "): " + elf + " -> " + TextFormat.shortDebugString(ret));
return ret;
}
use of org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile in project hadoop by apache.
the class Journal method newEpoch.
/**
* Try to create a new epoch for this journal.
* @param nsInfo the namespace, which is verified for consistency or used to
* format, if the Journal has not yet been written to.
* @param epoch the epoch to start
* @return the status information necessary to begin recovery
* @throws IOException if the node has already made a promise to another
* writer with a higher epoch number, if the namespace is inconsistent,
* or if a disk error occurs.
*/
synchronized NewEpochResponseProto newEpoch(NamespaceInfo nsInfo, long epoch) throws IOException {
checkFormatted();
storage.checkConsistentNamespace(nsInfo);
// any other that we've promised.
if (epoch <= getLastPromisedEpoch()) {
throw new IOException("Proposed epoch " + epoch + " <= last promise " + getLastPromisedEpoch());
}
updateLastPromisedEpoch(epoch);
abortCurSegment();
NewEpochResponseProto.Builder builder = NewEpochResponseProto.newBuilder();
EditLogFile latestFile = scanStorageForLatestEdits();
if (latestFile != null) {
builder.setLastSegmentTxId(latestFile.getFirstTxId());
}
return builder.build();
}
Aggregations