Search in sources :

Example 1 with FSImageFile

use of org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile in project hadoop by apache.

the class NNStorageRetentionManager method purgeCheckpoinsAfter.

void purgeCheckpoinsAfter(NameNodeFile nnf, long fromTxId) throws IOException {
    FSImageTransactionalStorageInspector inspector = new FSImageTransactionalStorageInspector(EnumSet.of(nnf));
    storage.inspectStorageDirs(inspector);
    for (FSImageFile image : inspector.getFoundImages()) {
        if (image.getCheckpointTxId() > fromTxId) {
            purger.purgeImage(image);
        }
    }
}
Also used : FSImageFile(org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile)

Example 2 with FSImageFile

use of org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile in project hadoop by apache.

the class TestNNStorageRetentionManager method runTest.

private void runTest(TestCaseDescription tc) throws IOException {
    StoragePurger mockPurger = Mockito.mock(NNStorageRetentionManager.StoragePurger.class);
    ArgumentCaptor<FSImageFile> imagesPurgedCaptor = ArgumentCaptor.forClass(FSImageFile.class);
    ArgumentCaptor<EditLogFile> logsPurgedCaptor = ArgumentCaptor.forClass(EditLogFile.class);
    // Ask the manager to purge files we don't need any more
    new NNStorageRetentionManager(conf, tc.mockStorage(), tc.mockEditLog(mockPurger), mockPurger).purgeOldStorage(NameNodeFile.IMAGE);
    // Verify that it asked the purger to remove the correct files
    Mockito.verify(mockPurger, Mockito.atLeast(0)).purgeImage(imagesPurgedCaptor.capture());
    Mockito.verify(mockPurger, Mockito.atLeast(0)).purgeLog(logsPurgedCaptor.capture());
    // Check images
    Set<String> purgedPaths = Sets.newLinkedHashSet();
    for (FSImageFile purged : imagesPurgedCaptor.getAllValues()) {
        purgedPaths.add(fileToPath(purged.getFile()));
    }
    Assert.assertEquals(Joiner.on(",").join(filesToPaths(tc.expectedPurgedImages)), Joiner.on(",").join(purgedPaths));
    // Check images
    purgedPaths.clear();
    for (EditLogFile purged : logsPurgedCaptor.getAllValues()) {
        purgedPaths.add(fileToPath(purged.getFile()));
    }
    Assert.assertEquals(Joiner.on(",").join(filesToPaths(tc.expectedPurgedLogs)), Joiner.on(",").join(purgedPaths));
}
Also used : FSImageFile(org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile) EditLogFile(org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile) StoragePurger(org.apache.hadoop.hdfs.server.namenode.NNStorageRetentionManager.StoragePurger)

Example 3 with FSImageFile

use of org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile in project hadoop by apache.

the class TestFSImageStorageInspector method testCurrentStorageInspector.

/**
   * Simple test with image, edits, and inprogress edits
   */
@Test
public void testCurrentStorageInspector() throws IOException {
    FSImageTransactionalStorageInspector inspector = new FSImageTransactionalStorageInspector();
    StorageDirectory mockDir = FSImageTestUtil.mockStorageDirectory(NameNodeDirType.IMAGE_AND_EDITS, false, "/foo/current/" + getImageFileName(123), "/foo/current/" + getFinalizedEditsFileName(123, 456), "/foo/current/" + getImageFileName(456), "/foo/current/" + getInProgressEditsFileName(457));
    inspector.inspectDirectory(mockDir);
    assertEquals(2, inspector.foundImages.size());
    FSImageFile latestImage = inspector.getLatestImages().get(0);
    assertEquals(456, latestImage.txId);
    assertSame(mockDir, latestImage.sd);
    assertTrue(inspector.isUpgradeFinalized());
    assertEquals(new File("/foo/current/" + getImageFileName(456)), latestImage.getFile());
}
Also used : FSImageFile(org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) File(java.io.File) FSImageFile(org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile) Test(org.junit.Test)

Example 4 with FSImageFile

use of org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile in project hadoop by apache.

the class FSImageTestUtil method findNewestImageFile.

/**
   * @return the fsimage file with the most recent transaction ID in the
   * given 'current/' directory.
   */
public static File findNewestImageFile(String currentDirPath) throws IOException {
    StorageDirectory sd = FSImageTestUtil.mockStorageDirectory(new File(currentDirPath), NameNodeDirType.IMAGE);
    FSImageTransactionalStorageInspector inspector = new FSImageTransactionalStorageInspector();
    inspector.inspectDirectory(sd);
    List<FSImageFile> latestImages = inspector.getLatestImages();
    return (latestImages.isEmpty()) ? null : latestImages.get(0).getFile();
}
Also used : FSImageFile(org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) RandomAccessFile(java.io.RandomAccessFile) EditLogFile(org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile) FSImageFile(org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile) File(java.io.File)

Example 5 with FSImageFile

use of org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile in project hadoop by apache.

the class FSImage method loadFSImage.

/**
   * Choose latest image from one of the directories,
   * load it and merge with the edits.
   * 
   * Saving and loading fsimage should never trigger symlink resolution. 
   * The paths that are persisted do not have *intermediate* symlinks 
   * because intermediate symlinks are resolved at the time files, 
   * directories, and symlinks are created. All paths accessed while 
   * loading or saving fsimage should therefore only see symlinks as 
   * the final path component, and the functions called below do not
   * resolve symlinks that are the final path component.
   *
   * @return whether the image should be saved
   * @throws IOException
   */
private boolean loadFSImage(FSNamesystem target, StartupOption startOpt, MetaRecoveryContext recovery) throws IOException {
    final boolean rollingRollback = RollingUpgradeStartupOption.ROLLBACK.matches(startOpt);
    final EnumSet<NameNodeFile> nnfs;
    if (rollingRollback) {
        // if it is rollback of rolling upgrade, only load from the rollback image
        nnfs = EnumSet.of(NameNodeFile.IMAGE_ROLLBACK);
    } else {
        // otherwise we can load from both IMAGE and IMAGE_ROLLBACK
        nnfs = EnumSet.of(NameNodeFile.IMAGE, NameNodeFile.IMAGE_ROLLBACK);
    }
    final FSImageStorageInspector inspector = storage.readAndInspectDirs(nnfs, startOpt);
    isUpgradeFinalized = inspector.isUpgradeFinalized();
    List<FSImageFile> imageFiles = inspector.getLatestImages();
    StartupProgress prog = NameNode.getStartupProgress();
    prog.beginPhase(Phase.LOADING_FSIMAGE);
    File phaseFile = imageFiles.get(0).getFile();
    prog.setFile(Phase.LOADING_FSIMAGE, phaseFile.getAbsolutePath());
    prog.setSize(Phase.LOADING_FSIMAGE, phaseFile.length());
    boolean needToSave = inspector.needToSave();
    Iterable<EditLogInputStream> editStreams = null;
    initEditLog(startOpt);
    if (NameNodeLayoutVersion.supports(LayoutVersion.Feature.TXID_BASED_LAYOUT, getLayoutVersion())) {
        // If we're open for write, we're either non-HA or we're the active NN, so
        // we better be able to load all the edits. If we're the standby NN, it's
        // OK to not be able to read all of edits right now.
        // In the meanwhile, for HA upgrade, we will still write editlog thus need
        // this toAtLeastTxId to be set to the max-seen txid
        // For rollback in rolling upgrade, we need to set the toAtLeastTxId to
        // the txid right before the upgrade marker.  
        long toAtLeastTxId = editLog.isOpenForWrite() ? inspector.getMaxSeenTxId() : 0;
        if (rollingRollback) {
            // note that the first image in imageFiles is the special checkpoint
            // for the rolling upgrade
            toAtLeastTxId = imageFiles.get(0).getCheckpointTxId() + 2;
        }
        editStreams = editLog.selectInputStreams(imageFiles.get(0).getCheckpointTxId() + 1, toAtLeastTxId, recovery, false);
    } else {
        editStreams = FSImagePreTransactionalStorageInspector.getEditLogStreams(storage);
    }
    int maxOpSize = conf.getInt(DFSConfigKeys.DFS_NAMENODE_MAX_OP_SIZE_KEY, DFSConfigKeys.DFS_NAMENODE_MAX_OP_SIZE_DEFAULT);
    for (EditLogInputStream elis : editStreams) {
        elis.setMaxOpSize(maxOpSize);
    }
    for (EditLogInputStream l : editStreams) {
        LOG.debug("Planning to load edit log stream: " + l);
    }
    if (!editStreams.iterator().hasNext()) {
        LOG.info("No edit log streams selected.");
    }
    FSImageFile imageFile = null;
    for (int i = 0; i < imageFiles.size(); i++) {
        try {
            imageFile = imageFiles.get(i);
            loadFSImageFile(target, recovery, imageFile, startOpt);
            break;
        } catch (IllegalReservedPathException ie) {
            throw new IOException("Failed to load image from " + imageFile, ie);
        } catch (Exception e) {
            LOG.error("Failed to load image from " + imageFile, e);
            target.clear();
            imageFile = null;
        }
    }
    // Failed to load any images, error out
    if (imageFile == null) {
        FSEditLog.closeAllStreams(editStreams);
        throw new IOException("Failed to load FSImage file, see error(s) " + "above for more info.");
    }
    prog.endPhase(Phase.LOADING_FSIMAGE);
    if (!rollingRollback) {
        long txnsAdvanced = loadEdits(editStreams, target, startOpt, recovery);
        needToSave |= needsResaveBasedOnStaleCheckpoint(imageFile.getFile(), txnsAdvanced);
    } else {
        // Trigger the rollback for rolling upgrade. Here lastAppliedTxId equals
        // to the last txid in rollback fsimage.
        rollingRollback(lastAppliedTxId + 1, imageFiles.get(0).getCheckpointTxId());
        needToSave = false;
    }
    editLog.setNextTxId(lastAppliedTxId + 1);
    return needToSave;
}
Also used : FSImageFile(org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile) IOException(java.io.IOException) FileNotFoundException(java.io.FileNotFoundException) InconsistentFSStateException(org.apache.hadoop.hdfs.server.common.InconsistentFSStateException) IOException(java.io.IOException) NameNodeFile(org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile) NameNodeFile(org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile) FSImageFile(org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile) File(java.io.File) StartupProgress(org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress)

Aggregations

FSImageFile (org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile)9 File (java.io.File)5 StorageDirectory (org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory)3 EditLogFile (org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile)3 IOException (java.io.IOException)2 RandomAccessFile (java.io.RandomAccessFile)2 NameNodeFile (org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile)2 FileNotFoundException (java.io.FileNotFoundException)1 ArrayList (java.util.ArrayList)1 InconsistentFSStateException (org.apache.hadoop.hdfs.server.common.InconsistentFSStateException)1 StoragePurger (org.apache.hadoop.hdfs.server.namenode.NNStorageRetentionManager.StoragePurger)1 StartupProgress (org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress)1 Test (org.junit.Test)1