Search in sources :

Example 76 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class FSImage method doRollback.

void doRollback(FSNamesystem fsns) throws IOException {
    // Rollback is allowed only if there is 
    // a previous fs states in at least one of the storage directories.
    // Directories that don't have previous state do not rollback
    boolean canRollback = false;
    FSImage prevState = new FSImage(conf);
    try {
        prevState.getStorage().layoutVersion = HdfsServerConstants.NAMENODE_LAYOUT_VERSION;
        for (Iterator<StorageDirectory> it = storage.dirIterator(false); it.hasNext(); ) {
            StorageDirectory sd = it.next();
            if (!NNUpgradeUtil.canRollBack(sd, storage, prevState.getStorage(), HdfsServerConstants.NAMENODE_LAYOUT_VERSION)) {
                continue;
            }
            LOG.info("Can perform rollback for " + sd);
            canRollback = true;
        }
        if (fsns.isHaEnabled()) {
            // If HA is enabled, check if the shared log can be rolled back as well.
            editLog.initJournalsForWrite();
            boolean canRollBackSharedEditLog = editLog.canRollBackSharedLog(prevState.getStorage(), HdfsServerConstants.NAMENODE_LAYOUT_VERSION);
            if (canRollBackSharedEditLog) {
                LOG.info("Can perform rollback for shared edit log.");
                canRollback = true;
            }
        }
        if (!canRollback)
            throw new IOException("Cannot rollback. None of the storage " + "directories contain previous fs state.");
        // Do rollback for each directory containing previous state
        for (Iterator<StorageDirectory> it = storage.dirIterator(false); it.hasNext(); ) {
            StorageDirectory sd = it.next();
            LOG.info("Rolling back storage directory " + sd.getRoot() + ".\n   new LV = " + prevState.getStorage().getLayoutVersion() + "; new CTime = " + prevState.getStorage().getCTime());
            NNUpgradeUtil.doRollBack(sd);
        }
        if (fsns.isHaEnabled()) {
            // If HA is enabled, try to roll back the shared log as well.
            editLog.doRollback();
        }
        isUpgradeFinalized = true;
    } finally {
        prevState.close();
    }
}
Also used : StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) IOException(java.io.IOException)

Example 77 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class FSImage method renameCheckpoint.

/**
   * Rename all the fsimage files with the specific NameNodeFile type. The
   * associated checksum files will also be renamed.
   */
void renameCheckpoint(NameNodeFile fromNnf, NameNodeFile toNnf) throws IOException {
    ArrayList<StorageDirectory> al = null;
    FSImageTransactionalStorageInspector inspector = new FSImageTransactionalStorageInspector(EnumSet.of(fromNnf));
    storage.inspectStorageDirs(inspector);
    for (FSImageFile image : inspector.getFoundImages()) {
        try {
            renameImageFileInDir(image.sd, fromNnf, toNnf, image.txId, true);
        } catch (IOException ioe) {
            LOG.warn("Unable to rename checkpoint in " + image.sd, ioe);
            if (al == null) {
                al = Lists.newArrayList();
            }
            al.add(image.sd);
        }
    }
    if (al != null) {
        storage.reportErrorsOnDirectories(al);
    }
}
Also used : FSImageFile(org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) IOException(java.io.IOException)

Example 78 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class FSImage method finalizeUpgrade.

void finalizeUpgrade(boolean finalizeEditLog) throws IOException {
    LOG.info("Finalizing upgrade for local dirs. " + (storage.getLayoutVersion() == 0 ? "" : "\n   cur LV = " + storage.getLayoutVersion() + "; cur CTime = " + storage.getCTime()));
    for (Iterator<StorageDirectory> it = storage.dirIterator(false); it.hasNext(); ) {
        StorageDirectory sd = it.next();
        NNUpgradeUtil.doFinalize(sd);
    }
    if (finalizeEditLog) {
        // We only do this in the case that HA is enabled and we're active. In any
        // other case the NN will have done the upgrade of the edits directories
        // already by virtue of the fact that they're local.
        editLog.doFinalizeOfSharedLog();
    }
    isUpgradeFinalized = true;
}
Also used : StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory)

Example 79 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class FSImage method loadFSImageFile.

void loadFSImageFile(FSNamesystem target, MetaRecoveryContext recovery, FSImageFile imageFile, StartupOption startupOption) throws IOException {
    LOG.info("Planning to load image: " + imageFile);
    StorageDirectory sdForProperties = imageFile.sd;
    storage.readProperties(sdForProperties, startupOption);
    if (NameNodeLayoutVersion.supports(LayoutVersion.Feature.TXID_BASED_LAYOUT, getLayoutVersion())) {
        // For txid-based layout, we should have a .md5 file
        // next to the image file
        boolean isRollingRollback = RollingUpgradeStartupOption.ROLLBACK.matches(startupOption);
        loadFSImage(imageFile.getFile(), target, recovery, isRollingRollback);
    } else if (NameNodeLayoutVersion.supports(LayoutVersion.Feature.FSIMAGE_CHECKSUM, getLayoutVersion())) {
        // In 0.22, we have the checksum stored in the VERSION file.
        String md5 = storage.getDeprecatedProperty(NNStorage.DEPRECATED_MESSAGE_DIGEST_PROPERTY);
        if (md5 == null) {
            throw new InconsistentFSStateException(sdForProperties.getRoot(), "Message digest property " + NNStorage.DEPRECATED_MESSAGE_DIGEST_PROPERTY + " not set for storage directory " + sdForProperties.getRoot());
        }
        loadFSImage(imageFile.getFile(), new MD5Hash(md5), target, recovery, false);
    } else {
        // We don't have any record of the md5sum
        loadFSImage(imageFile.getFile(), null, target, recovery, false);
    }
}
Also used : MD5Hash(org.apache.hadoop.io.MD5Hash) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) InconsistentFSStateException(org.apache.hadoop.hdfs.server.common.InconsistentFSStateException)

Example 80 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class FSImage method deleteCancelledCheckpoint.

/**
   * Deletes the checkpoint file in every storage directory,
   * since the checkpoint was cancelled.
   */
private void deleteCancelledCheckpoint(long txid) throws IOException {
    ArrayList<StorageDirectory> al = Lists.newArrayList();
    for (StorageDirectory sd : storage.dirIterable(NameNodeDirType.IMAGE)) {
        File ckpt = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE_NEW, txid);
        if (ckpt.exists() && !ckpt.delete()) {
            LOG.warn("Unable to delete cancelled checkpoint in " + sd);
            al.add(sd);
        }
    }
    storage.reportErrorsOnDirectories(al);
}
Also used : StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) NameNodeFile(org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile) FSImageFile(org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile) File(java.io.File)

Aggregations

StorageDirectory (org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory)83 File (java.io.File)59 Test (org.junit.Test)45 RandomAccessFile (java.io.RandomAccessFile)29 IOException (java.io.IOException)24 Configuration (org.apache.hadoop.conf.Configuration)22 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)21 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)20 EditLogFile (org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile)19 NameNodeFile (org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile)15 URI (java.net.URI)11 FileSystem (org.apache.hadoop.fs.FileSystem)11 Path (org.apache.hadoop.fs.Path)10 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)9 FSImageFile (org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile)7 FileJournalManager.getLogFile (org.apache.hadoop.hdfs.server.namenode.FileJournalManager.getLogFile)6 InconsistentFSStateException (org.apache.hadoop.hdfs.server.common.InconsistentFSStateException)5 AbortSpec (org.apache.hadoop.hdfs.server.namenode.TestEditLog.AbortSpec)5 ArrayList (java.util.ArrayList)4 StorageState (org.apache.hadoop.hdfs.server.common.Storage.StorageState)4