use of org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile in project hadoop by apache.
the class TestJournalNodeSync method deleteEditLog.
private File deleteEditLog(File currentDir, long startTxId) throws IOException {
EditLogFile logFile = getLogFile(currentDir, startTxId);
while (logFile.isInProgress()) {
dfsCluster.getNameNode(0).getRpcServer().rollEditLog();
logFile = getLogFile(currentDir, startTxId);
}
File deleteFile = logFile.getFile();
Assert.assertTrue("Couldn't delete edit log file", deleteFile.delete());
return deleteFile;
}
use of org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile in project hadoop by apache.
the class TestQuorumJournalManager method checkRecovery.
private void checkRecovery(MiniJournalCluster cluster, long segmentTxId, long expectedEndTxId) throws IOException {
int numFinalized = 0;
for (int i = 0; i < cluster.getNumNodes(); i++) {
File logDir = cluster.getCurrentDir(i, JID);
EditLogFile elf = FileJournalManager.getLogFile(logDir, segmentTxId);
if (elf == null) {
continue;
}
if (!elf.isInProgress()) {
numFinalized++;
if (elf.getLastTxId() != expectedEndTxId) {
fail("File " + elf + " finalized to wrong txid, expected " + expectedEndTxId);
}
}
}
if (numFinalized < cluster.getQuorumSize()) {
fail("Did not find a quorum of finalized logs starting at " + segmentTxId);
}
}
use of org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile in project hadoop by apache.
the class TestNNStorageRetentionManager method runTest.
private void runTest(TestCaseDescription tc) throws IOException {
StoragePurger mockPurger = Mockito.mock(NNStorageRetentionManager.StoragePurger.class);
ArgumentCaptor<FSImageFile> imagesPurgedCaptor = ArgumentCaptor.forClass(FSImageFile.class);
ArgumentCaptor<EditLogFile> logsPurgedCaptor = ArgumentCaptor.forClass(EditLogFile.class);
// Ask the manager to purge files we don't need any more
new NNStorageRetentionManager(conf, tc.mockStorage(), tc.mockEditLog(mockPurger), mockPurger).purgeOldStorage(NameNodeFile.IMAGE);
// Verify that it asked the purger to remove the correct files
Mockito.verify(mockPurger, Mockito.atLeast(0)).purgeImage(imagesPurgedCaptor.capture());
Mockito.verify(mockPurger, Mockito.atLeast(0)).purgeLog(logsPurgedCaptor.capture());
// Check images
Set<String> purgedPaths = Sets.newLinkedHashSet();
for (FSImageFile purged : imagesPurgedCaptor.getAllValues()) {
purgedPaths.add(fileToPath(purged.getFile()));
}
Assert.assertEquals(Joiner.on(",").join(filesToPaths(tc.expectedPurgedImages)), Joiner.on(",").join(purgedPaths));
// Check images
purgedPaths.clear();
for (EditLogFile purged : logsPurgedCaptor.getAllValues()) {
purgedPaths.add(fileToPath(purged.getFile()));
}
Assert.assertEquals(Joiner.on(",").join(filesToPaths(tc.expectedPurgedLogs)), Joiner.on(",").join(purgedPaths));
}
use of org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile in project hadoop by apache.
the class GetJournalEditServlet method doGet.
@Override
public void doGet(final HttpServletRequest request, final HttpServletResponse response) throws ServletException, IOException {
FileInputStream editFileIn = null;
try {
final ServletContext context = getServletContext();
final Configuration conf = (Configuration) getServletContext().getAttribute(JspHelper.CURRENT_CONF);
final String journalId = request.getParameter(JOURNAL_ID_PARAM);
QuorumJournalManager.checkJournalId(journalId);
final JNStorage storage = JournalNodeHttpServer.getJournalFromContext(context, journalId).getStorage();
// Check security
if (!checkRequestorOrSendError(conf, request, response)) {
return;
}
// Check that the namespace info is correct
if (!checkStorageInfoOrSendError(storage, request, response)) {
return;
}
long segmentTxId = ServletUtil.parseLongParam(request, SEGMENT_TXID_PARAM);
FileJournalManager fjm = storage.getJournalManager();
File editFile;
synchronized (fjm) {
// Synchronize on the FJM so that the file doesn't get finalized
// out from underneath us while we're in the process of opening
// it up.
EditLogFile elf = fjm.getLogFile(segmentTxId);
if (elf == null) {
response.sendError(HttpServletResponse.SC_NOT_FOUND, "No edit log found starting at txid " + segmentTxId);
return;
}
editFile = elf.getFile();
ImageServlet.setVerificationHeadersForGet(response, editFile);
ImageServlet.setFileNameHeaders(response, editFile);
editFileIn = new FileInputStream(editFile);
}
DataTransferThrottler throttler = ImageServlet.getThrottler(conf);
// send edits
TransferFsImage.copyFileToStream(response.getOutputStream(), editFile, editFileIn, throttler);
} catch (Throwable t) {
String errMsg = "getedit failed. " + StringUtils.stringifyException(t);
response.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, errMsg);
throw new IOException(errMsg);
} finally {
IOUtils.closeStream(editFileIn);
}
}
use of org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile in project hadoop by apache.
the class Journal method scanStorageForLatestEdits.
/**
* Scan the local storage directory, and return the segment containing
* the highest transaction.
* @return the EditLogFile with the highest transactions, or null
* if no files exist.
*/
private synchronized EditLogFile scanStorageForLatestEdits() throws IOException {
if (!fjm.getStorageDirectory().getCurrentDir().exists()) {
return null;
}
LOG.info("Scanning storage " + fjm);
List<EditLogFile> files = fjm.getLogFiles(0);
while (!files.isEmpty()) {
EditLogFile latestLog = files.remove(files.size() - 1);
latestLog.scanLog(Long.MAX_VALUE, false);
LOG.info("Latest log is " + latestLog);
if (latestLog.getLastTxId() == HdfsServerConstants.INVALID_TXID) {
// the log contains no transactions
LOG.warn("Latest log " + latestLog + " has no transactions. " + "moving it aside and looking for previous log");
latestLog.moveAsideEmptyFile();
} else {
return latestLog;
}
}
LOG.info("No files in " + fjm);
return null;
}
Aggregations