use of org.apache.hadoop.hdfs.server.namenode.FileJournalManager in project hadoop by apache.
the class GetJournalEditServlet method doGet.
@Override
public void doGet(final HttpServletRequest request, final HttpServletResponse response) throws ServletException, IOException {
FileInputStream editFileIn = null;
try {
final ServletContext context = getServletContext();
final Configuration conf = (Configuration) getServletContext().getAttribute(JspHelper.CURRENT_CONF);
final String journalId = request.getParameter(JOURNAL_ID_PARAM);
QuorumJournalManager.checkJournalId(journalId);
final JNStorage storage = JournalNodeHttpServer.getJournalFromContext(context, journalId).getStorage();
// Check security
if (!checkRequestorOrSendError(conf, request, response)) {
return;
}
// Check that the namespace info is correct
if (!checkStorageInfoOrSendError(storage, request, response)) {
return;
}
long segmentTxId = ServletUtil.parseLongParam(request, SEGMENT_TXID_PARAM);
FileJournalManager fjm = storage.getJournalManager();
File editFile;
synchronized (fjm) {
// Synchronize on the FJM so that the file doesn't get finalized
// out from underneath us while we're in the process of opening
// it up.
EditLogFile elf = fjm.getLogFile(segmentTxId);
if (elf == null) {
response.sendError(HttpServletResponse.SC_NOT_FOUND, "No edit log found starting at txid " + segmentTxId);
return;
}
editFile = elf.getFile();
ImageServlet.setVerificationHeadersForGet(response, editFile);
ImageServlet.setFileNameHeaders(response, editFile);
editFileIn = new FileInputStream(editFile);
}
DataTransferThrottler throttler = ImageServlet.getThrottler(conf);
// send edits
TransferFsImage.copyFileToStream(response.getOutputStream(), editFile, editFileIn, throttler);
} catch (Throwable t) {
String errMsg = "getedit failed. " + StringUtils.stringifyException(t);
response.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, errMsg);
throw new IOException(errMsg);
} finally {
IOUtils.closeStream(editFileIn);
}
}
Aggregations