Search in sources :

Example 6 with EditLogFile

use of org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile in project hadoop by apache.

the class TestJournalNodeSync method deleteEditLog.

private File deleteEditLog(File currentDir, long startTxId) throws IOException {
    EditLogFile logFile = getLogFile(currentDir, startTxId);
    while (logFile.isInProgress()) {
        dfsCluster.getNameNode(0).getRpcServer().rollEditLog();
        logFile = getLogFile(currentDir, startTxId);
    }
    File deleteFile = logFile.getFile();
    Assert.assertTrue("Couldn't delete edit log file", deleteFile.delete());
    return deleteFile;
}
Also used : EditLogFile(org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile) File(java.io.File) FileJournalManager.getLogFile(org.apache.hadoop.hdfs.server.namenode.FileJournalManager.getLogFile) EditLogFile(org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile)

Example 7 with EditLogFile

use of org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile in project hadoop by apache.

the class TestQuorumJournalManager method checkRecovery.

private void checkRecovery(MiniJournalCluster cluster, long segmentTxId, long expectedEndTxId) throws IOException {
    int numFinalized = 0;
    for (int i = 0; i < cluster.getNumNodes(); i++) {
        File logDir = cluster.getCurrentDir(i, JID);
        EditLogFile elf = FileJournalManager.getLogFile(logDir, segmentTxId);
        if (elf == null) {
            continue;
        }
        if (!elf.isInProgress()) {
            numFinalized++;
            if (elf.getLastTxId() != expectedEndTxId) {
                fail("File " + elf + " finalized to wrong txid, expected " + expectedEndTxId);
            }
        }
    }
    if (numFinalized < cluster.getQuorumSize()) {
        fail("Did not find a quorum of finalized logs starting at " + segmentTxId);
    }
}
Also used : EditLogFile(org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile) EditLogFile(org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile) File(java.io.File)

Example 8 with EditLogFile

use of org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile in project hadoop by apache.

the class TestNNStorageRetentionManager method runTest.

private void runTest(TestCaseDescription tc) throws IOException {
    StoragePurger mockPurger = Mockito.mock(NNStorageRetentionManager.StoragePurger.class);
    ArgumentCaptor<FSImageFile> imagesPurgedCaptor = ArgumentCaptor.forClass(FSImageFile.class);
    ArgumentCaptor<EditLogFile> logsPurgedCaptor = ArgumentCaptor.forClass(EditLogFile.class);
    // Ask the manager to purge files we don't need any more
    new NNStorageRetentionManager(conf, tc.mockStorage(), tc.mockEditLog(mockPurger), mockPurger).purgeOldStorage(NameNodeFile.IMAGE);
    // Verify that it asked the purger to remove the correct files
    Mockito.verify(mockPurger, Mockito.atLeast(0)).purgeImage(imagesPurgedCaptor.capture());
    Mockito.verify(mockPurger, Mockito.atLeast(0)).purgeLog(logsPurgedCaptor.capture());
    // Check images
    Set<String> purgedPaths = Sets.newLinkedHashSet();
    for (FSImageFile purged : imagesPurgedCaptor.getAllValues()) {
        purgedPaths.add(fileToPath(purged.getFile()));
    }
    Assert.assertEquals(Joiner.on(",").join(filesToPaths(tc.expectedPurgedImages)), Joiner.on(",").join(purgedPaths));
    // Check images
    purgedPaths.clear();
    for (EditLogFile purged : logsPurgedCaptor.getAllValues()) {
        purgedPaths.add(fileToPath(purged.getFile()));
    }
    Assert.assertEquals(Joiner.on(",").join(filesToPaths(tc.expectedPurgedLogs)), Joiner.on(",").join(purgedPaths));
}
Also used : FSImageFile(org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile) EditLogFile(org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile) StoragePurger(org.apache.hadoop.hdfs.server.namenode.NNStorageRetentionManager.StoragePurger)

Example 9 with EditLogFile

use of org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile in project hadoop by apache.

the class GetJournalEditServlet method doGet.

@Override
public void doGet(final HttpServletRequest request, final HttpServletResponse response) throws ServletException, IOException {
    FileInputStream editFileIn = null;
    try {
        final ServletContext context = getServletContext();
        final Configuration conf = (Configuration) getServletContext().getAttribute(JspHelper.CURRENT_CONF);
        final String journalId = request.getParameter(JOURNAL_ID_PARAM);
        QuorumJournalManager.checkJournalId(journalId);
        final JNStorage storage = JournalNodeHttpServer.getJournalFromContext(context, journalId).getStorage();
        // Check security
        if (!checkRequestorOrSendError(conf, request, response)) {
            return;
        }
        // Check that the namespace info is correct
        if (!checkStorageInfoOrSendError(storage, request, response)) {
            return;
        }
        long segmentTxId = ServletUtil.parseLongParam(request, SEGMENT_TXID_PARAM);
        FileJournalManager fjm = storage.getJournalManager();
        File editFile;
        synchronized (fjm) {
            // Synchronize on the FJM so that the file doesn't get finalized
            // out from underneath us while we're in the process of opening
            // it up.
            EditLogFile elf = fjm.getLogFile(segmentTxId);
            if (elf == null) {
                response.sendError(HttpServletResponse.SC_NOT_FOUND, "No edit log found starting at txid " + segmentTxId);
                return;
            }
            editFile = elf.getFile();
            ImageServlet.setVerificationHeadersForGet(response, editFile);
            ImageServlet.setFileNameHeaders(response, editFile);
            editFileIn = new FileInputStream(editFile);
        }
        DataTransferThrottler throttler = ImageServlet.getThrottler(conf);
        // send edits
        TransferFsImage.copyFileToStream(response.getOutputStream(), editFile, editFileIn, throttler);
    } catch (Throwable t) {
        String errMsg = "getedit failed. " + StringUtils.stringifyException(t);
        response.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, errMsg);
        throw new IOException(errMsg);
    } finally {
        IOUtils.closeStream(editFileIn);
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) EditLogFile(org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile) ServletContext(javax.servlet.ServletContext) FileJournalManager(org.apache.hadoop.hdfs.server.namenode.FileJournalManager) DataTransferThrottler(org.apache.hadoop.hdfs.util.DataTransferThrottler) IOException(java.io.IOException) EditLogFile(org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile) File(java.io.File) FileInputStream(java.io.FileInputStream)

Example 10 with EditLogFile

use of org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile in project hadoop by apache.

the class Journal method scanStorageForLatestEdits.

/**
   * Scan the local storage directory, and return the segment containing
   * the highest transaction.
   * @return the EditLogFile with the highest transactions, or null
   * if no files exist.
   */
private synchronized EditLogFile scanStorageForLatestEdits() throws IOException {
    if (!fjm.getStorageDirectory().getCurrentDir().exists()) {
        return null;
    }
    LOG.info("Scanning storage " + fjm);
    List<EditLogFile> files = fjm.getLogFiles(0);
    while (!files.isEmpty()) {
        EditLogFile latestLog = files.remove(files.size() - 1);
        latestLog.scanLog(Long.MAX_VALUE, false);
        LOG.info("Latest log is " + latestLog);
        if (latestLog.getLastTxId() == HdfsServerConstants.INVALID_TXID) {
            // the log contains no transactions
            LOG.warn("Latest log " + latestLog + " has no transactions. " + "moving it aside and looking for previous log");
            latestLog.moveAsideEmptyFile();
        } else {
            return latestLog;
        }
    }
    LOG.info("No files in " + fjm);
    return null;
}
Also used : EditLogFile(org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile)

Aggregations

EditLogFile (org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile)12 File (java.io.File)5 Configuration (org.apache.hadoop.conf.Configuration)4 IOException (java.io.IOException)3 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)3 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)3 Test (org.junit.Test)3 RandomAccessFile (java.io.RandomAccessFile)2 FileSystem (org.apache.hadoop.fs.FileSystem)2 Path (org.apache.hadoop.fs.Path)2 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)2 StorageDirectory (org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory)2 FSImageFile (org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile)2 VisibleForTesting (com.google.common.annotations.VisibleForTesting)1 FileInputStream (java.io.FileInputStream)1 ServletContext (javax.servlet.ServletContext)1 NewEpochResponseProto (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto)1 SegmentStateProto (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto)1 DelegationTokenIdentifier (org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier)1 FileJournalManager (org.apache.hadoop.hdfs.server.namenode.FileJournalManager)1