Search in sources :

Example 1 with StorageState

use of org.apache.hadoop.hdfs.server.common.Storage.StorageState in project hadoop by apache.

the class FSImage method recoverStorageDirs.

/**
   * For each storage directory, performs recovery of incomplete transitions
   * (eg. upgrade, rollback, checkpoint) and inserts the directory's storage
   * state into the dataDirStates map.
   * @param dataDirStates output of storage directory states
   * @return true if there is at least one valid formatted storage directory
   */
public static boolean recoverStorageDirs(StartupOption startOpt, NNStorage storage, Map<StorageDirectory, StorageState> dataDirStates) throws IOException {
    boolean isFormatted = false;
    // mutate the shared dir below in the actual loop.
    for (Iterator<StorageDirectory> it = storage.dirIterator(); it.hasNext(); ) {
        StorageDirectory sd = it.next();
        StorageState curState;
        if (startOpt == StartupOption.METADATAVERSION) {
            /* All we need is the layout version. */
            storage.readProperties(sd);
            return true;
        }
        try {
            curState = sd.analyzeStorage(startOpt, storage);
            // sd is locked but not opened
            switch(curState) {
                case NON_EXISTENT:
                    // name-node fails if any of the configured storage dirs are missing
                    throw new InconsistentFSStateException(sd.getRoot(), "storage directory does not exist or is not accessible.");
                case NOT_FORMATTED:
                    break;
                case NORMAL:
                    break;
                default:
                    // recovery is possible
                    sd.doRecover(curState);
            }
            if (curState != StorageState.NOT_FORMATTED && startOpt != StartupOption.ROLLBACK) {
                // read and verify consistency with other directories
                storage.readProperties(sd, startOpt);
                isFormatted = true;
            }
            if (startOpt == StartupOption.IMPORT && isFormatted)
                // import of a checkpoint is allowed only into empty image directories
                throw new IOException("Cannot import image from a checkpoint. " + " NameNode already contains an image in " + sd.getRoot());
        } catch (IOException ioe) {
            sd.unlock();
            throw ioe;
        }
        dataDirStates.put(sd, curState);
    }
    return isFormatted;
}
Also used : StorageState(org.apache.hadoop.hdfs.server.common.Storage.StorageState) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) IOException(java.io.IOException) InconsistentFSStateException(org.apache.hadoop.hdfs.server.common.InconsistentFSStateException)

Example 2 with StorageState

use of org.apache.hadoop.hdfs.server.common.Storage.StorageState in project hadoop by apache.

the class FSImage method recoverTransitionRead.

/**
   * Analyze storage directories.
   * Recover from previous transitions if required. 
   * Perform fs state transition if necessary depending on the namespace info.
   * Read storage info. 
   * 
   * @throws IOException
   * @return true if the image needs to be saved or false otherwise
   */
boolean recoverTransitionRead(StartupOption startOpt, FSNamesystem target, MetaRecoveryContext recovery) throws IOException {
    assert startOpt != StartupOption.FORMAT : "NameNode formatting should be performed before reading the image";
    Collection<URI> imageDirs = storage.getImageDirectories();
    Collection<URI> editsDirs = editLog.getEditURIs();
    // none of the data dirs exist
    if ((imageDirs.size() == 0 || editsDirs.size() == 0) && startOpt != StartupOption.IMPORT)
        throw new IOException("All specified directories are not accessible or do not exist.");
    // 1. For each data directory calculate its state and 
    // check whether all is consistent before transitioning.
    Map<StorageDirectory, StorageState> dataDirStates = new HashMap<StorageDirectory, StorageState>();
    boolean isFormatted = recoverStorageDirs(startOpt, storage, dataDirStates);
    if (LOG.isTraceEnabled()) {
        LOG.trace("Data dir states:\n  " + Joiner.on("\n  ").withKeyValueSeparator(": ").join(dataDirStates));
    }
    if (!isFormatted && startOpt != StartupOption.ROLLBACK && startOpt != StartupOption.IMPORT) {
        throw new IOException("NameNode is not formatted.");
    }
    int layoutVersion = storage.getLayoutVersion();
    if (startOpt == StartupOption.METADATAVERSION) {
        System.out.println("HDFS Image Version: " + layoutVersion);
        System.out.println("Software format version: " + HdfsServerConstants.NAMENODE_LAYOUT_VERSION);
        return false;
    }
    if (layoutVersion < Storage.LAST_PRE_UPGRADE_LAYOUT_VERSION) {
        NNStorage.checkVersionUpgradable(storage.getLayoutVersion());
    }
    if (startOpt != StartupOption.UPGRADE && startOpt != StartupOption.UPGRADEONLY && !RollingUpgradeStartupOption.STARTED.matches(startOpt) && layoutVersion < Storage.LAST_PRE_UPGRADE_LAYOUT_VERSION && layoutVersion != HdfsServerConstants.NAMENODE_LAYOUT_VERSION) {
        throw new IOException("\nFile system image contains an old layout version " + storage.getLayoutVersion() + ".\nAn upgrade to version " + HdfsServerConstants.NAMENODE_LAYOUT_VERSION + " is required.\n" + "Please restart NameNode with the \"" + RollingUpgradeStartupOption.STARTED.getOptionString() + "\" option if a rolling upgrade is already started;" + " or restart NameNode with the \"" + StartupOption.UPGRADE.getName() + "\" option to start" + " a new upgrade.");
    }
    storage.processStartupOptionsForUpgrade(startOpt, layoutVersion);
    // 2. Format unformatted dirs.
    for (Iterator<StorageDirectory> it = storage.dirIterator(); it.hasNext(); ) {
        StorageDirectory sd = it.next();
        StorageState curState = dataDirStates.get(sd);
        switch(curState) {
            case NON_EXISTENT:
                throw new IOException(StorageState.NON_EXISTENT + " state cannot be here");
            case NOT_FORMATTED:
                LOG.info("Storage directory " + sd.getRoot() + " is not formatted.");
                LOG.info("Formatting ...");
                // create empty currrent dir
                sd.clearDirectory();
                break;
            default:
                break;
        }
    }
    // 3. Do transitions
    switch(startOpt) {
        case UPGRADE:
        case UPGRADEONLY:
            doUpgrade(target);
            // upgrade saved image already
            return false;
        case IMPORT:
            doImportCheckpoint(target);
            // import checkpoint saved image already
            return false;
        case ROLLBACK:
            throw new AssertionError("Rollback is now a standalone command, " + "NameNode should not be starting with this option.");
        case REGULAR:
        default:
    }
    return loadFSImage(target, startOpt, recovery);
}
Also used : HashMap(java.util.HashMap) StorageState(org.apache.hadoop.hdfs.server.common.Storage.StorageState) IOException(java.io.IOException) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) URI(java.net.URI)

Example 3 with StorageState

use of org.apache.hadoop.hdfs.server.common.Storage.StorageState in project hadoop by apache.

the class BootstrapStandby method doPreUpgrade.

/**
   * This is called when using bootstrapStandby for HA upgrade. The SBN should
   * also create previous directory so that later when it starts, it understands
   * that the cluster is in the upgrade state. This function renames the old
   * current directory to previous.tmp.
   */
private boolean doPreUpgrade(NNStorage storage, NamespaceInfo nsInfo) throws IOException {
    boolean isFormatted = false;
    Map<StorageDirectory, StorageState> dataDirStates = new HashMap<>();
    try {
        isFormatted = FSImage.recoverStorageDirs(StartupOption.UPGRADE, storage, dataDirStates);
        if (dataDirStates.values().contains(StorageState.NOT_FORMATTED)) {
            // recoverStorageDirs returns true if there is a formatted directory
            isFormatted = false;
            System.err.println("The original storage directory is not formatted.");
        }
    } catch (InconsistentFSStateException e) {
        // if the storage is in a bad state,
        LOG.warn("The storage directory is in an inconsistent state", e);
    } finally {
        storage.unlockAll();
    }
    // "-bootstrapStandby", we should still be fine.
    if (!isFormatted && !format(storage, nsInfo)) {
        return false;
    }
    // make sure there is no previous directory
    FSImage.checkUpgrade(storage);
    // Do preUpgrade for each directory
    for (Iterator<StorageDirectory> it = storage.dirIterator(false); it.hasNext(); ) {
        StorageDirectory sd = it.next();
        try {
            NNUpgradeUtil.renameCurToTmp(sd);
        } catch (IOException e) {
            LOG.error("Failed to move aside pre-upgrade storage " + "in image directory " + sd.getRoot(), e);
            throw e;
        }
    }
    storage.setStorageInfo(nsInfo);
    storage.setBlockPoolID(nsInfo.getBlockPoolID());
    return true;
}
Also used : HashMap(java.util.HashMap) StorageState(org.apache.hadoop.hdfs.server.common.Storage.StorageState) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) IOException(java.io.IOException) InconsistentFSStateException(org.apache.hadoop.hdfs.server.common.InconsistentFSStateException)

Example 4 with StorageState

use of org.apache.hadoop.hdfs.server.common.Storage.StorageState in project hadoop by apache.

the class BackupImage method recoverCreateRead.

/**
   * Analyze backup storage directories for consistency.<br>
   * Recover from incomplete checkpoints if required.<br>
   * Read VERSION and fstime files if exist.<br>
   * Do not load image or edits.
   *
   * @throws IOException if the node should shutdown.
   */
void recoverCreateRead() throws IOException {
    for (Iterator<StorageDirectory> it = storage.dirIterator(); it.hasNext(); ) {
        StorageDirectory sd = it.next();
        StorageState curState;
        try {
            curState = sd.analyzeStorage(HdfsServerConstants.StartupOption.REGULAR, storage);
            // sd is locked but not opened
            switch(curState) {
                case NON_EXISTENT:
                    // fail if any of the configured storage dirs are inaccessible
                    throw new InconsistentFSStateException(sd.getRoot(), "checkpoint directory does not exist or is not accessible.");
                case NOT_FORMATTED:
                    // for backup node all directories may be unformatted initially
                    LOG.info("Storage directory " + sd.getRoot() + " is not formatted.");
                    LOG.info("Formatting ...");
                    // create empty current
                    sd.clearDirectory();
                    break;
                case NORMAL:
                    break;
                default:
                    // recovery is possible
                    sd.doRecover(curState);
            }
            if (curState != StorageState.NOT_FORMATTED) {
                // read and verify consistency with other directories
                storage.readProperties(sd);
            }
        } catch (IOException ioe) {
            sd.unlock();
            throw ioe;
        }
    }
}
Also used : StorageState(org.apache.hadoop.hdfs.server.common.Storage.StorageState) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) IOException(java.io.IOException) InconsistentFSStateException(org.apache.hadoop.hdfs.server.common.InconsistentFSStateException)

Aggregations

IOException (java.io.IOException)4 StorageDirectory (org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory)4 StorageState (org.apache.hadoop.hdfs.server.common.Storage.StorageState)4 InconsistentFSStateException (org.apache.hadoop.hdfs.server.common.InconsistentFSStateException)3 HashMap (java.util.HashMap)2 URI (java.net.URI)1