Search in sources :

Example 1 with InconsistentFSStateException

use of org.apache.hadoop.hdfs.server.common.InconsistentFSStateException in project hadoop by apache.

the class TestStartup method testNNFailToStartOnReadOnlyNNDir.

@Test(timeout = 30000)
public void testNNFailToStartOnReadOnlyNNDir() throws Exception {
    /* set NN dir */
    final String nnDirStr = Paths.get(hdfsDir.toString(), GenericTestUtils.getMethodName(), "name").toString();
    config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nnDirStr);
    try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).manageNameDfsDirs(false).build()) {
        cluster.waitActive();
        /* get and verify NN dir */
        final Collection<URI> nnDirs = FSNamesystem.getNamespaceDirs(config);
        assertNotNull(nnDirs);
        assertTrue(nnDirs.iterator().hasNext());
        assertEquals("NN dir should be created after NN startup.", nnDirStr, nnDirs.iterator().next().getPath());
        final File nnDir = new File(nnDirStr);
        assertTrue(nnDir.exists());
        assertTrue(nnDir.isDirectory());
        try {
            /* set read only */
            assertTrue("Setting NN dir read only should succeed.", nnDir.setReadOnly());
            cluster.restartNameNodes();
            fail("Restarting NN should fail on read only NN dir.");
        } catch (InconsistentFSStateException e) {
            assertThat(e.toString(), is(allOf(containsString("InconsistentFSStateException"), containsString(nnDirStr), containsString("in an inconsistent state"), containsString("storage directory does not exist or is not accessible."))));
        } finally {
            /* set back to writable in order to clean it */
            assertTrue("Setting NN dir should succeed.", nnDir.setWritable(true));
        }
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) URI(java.net.URI) Util.fileAsURI(org.apache.hadoop.hdfs.server.common.Util.fileAsURI) NameNodeFile(org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile) File(java.io.File) InconsistentFSStateException(org.apache.hadoop.hdfs.server.common.InconsistentFSStateException) Test(org.junit.Test)

Example 2 with InconsistentFSStateException

use of org.apache.hadoop.hdfs.server.common.InconsistentFSStateException in project hadoop by apache.

the class BootstrapStandby method doPreUpgrade.

/**
   * This is called when using bootstrapStandby for HA upgrade. The SBN should
   * also create previous directory so that later when it starts, it understands
   * that the cluster is in the upgrade state. This function renames the old
   * current directory to previous.tmp.
   */
private boolean doPreUpgrade(NNStorage storage, NamespaceInfo nsInfo) throws IOException {
    boolean isFormatted = false;
    Map<StorageDirectory, StorageState> dataDirStates = new HashMap<>();
    try {
        isFormatted = FSImage.recoverStorageDirs(StartupOption.UPGRADE, storage, dataDirStates);
        if (dataDirStates.values().contains(StorageState.NOT_FORMATTED)) {
            // recoverStorageDirs returns true if there is a formatted directory
            isFormatted = false;
            System.err.println("The original storage directory is not formatted.");
        }
    } catch (InconsistentFSStateException e) {
        // if the storage is in a bad state,
        LOG.warn("The storage directory is in an inconsistent state", e);
    } finally {
        storage.unlockAll();
    }
    // "-bootstrapStandby", we should still be fine.
    if (!isFormatted && !format(storage, nsInfo)) {
        return false;
    }
    // make sure there is no previous directory
    FSImage.checkUpgrade(storage);
    // Do preUpgrade for each directory
    for (Iterator<StorageDirectory> it = storage.dirIterator(false); it.hasNext(); ) {
        StorageDirectory sd = it.next();
        try {
            NNUpgradeUtil.renameCurToTmp(sd);
        } catch (IOException e) {
            LOG.error("Failed to move aside pre-upgrade storage " + "in image directory " + sd.getRoot(), e);
            throw e;
        }
    }
    storage.setStorageInfo(nsInfo);
    storage.setBlockPoolID(nsInfo.getBlockPoolID());
    return true;
}
Also used : HashMap(java.util.HashMap) StorageState(org.apache.hadoop.hdfs.server.common.Storage.StorageState) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) IOException(java.io.IOException) InconsistentFSStateException(org.apache.hadoop.hdfs.server.common.InconsistentFSStateException)

Example 3 with InconsistentFSStateException

use of org.apache.hadoop.hdfs.server.common.InconsistentFSStateException in project hadoop by apache.

the class FSImage method recoverStorageDirs.

/**
   * For each storage directory, performs recovery of incomplete transitions
   * (eg. upgrade, rollback, checkpoint) and inserts the directory's storage
   * state into the dataDirStates map.
   * @param dataDirStates output of storage directory states
   * @return true if there is at least one valid formatted storage directory
   */
public static boolean recoverStorageDirs(StartupOption startOpt, NNStorage storage, Map<StorageDirectory, StorageState> dataDirStates) throws IOException {
    boolean isFormatted = false;
    // mutate the shared dir below in the actual loop.
    for (Iterator<StorageDirectory> it = storage.dirIterator(); it.hasNext(); ) {
        StorageDirectory sd = it.next();
        StorageState curState;
        if (startOpt == StartupOption.METADATAVERSION) {
            /* All we need is the layout version. */
            storage.readProperties(sd);
            return true;
        }
        try {
            curState = sd.analyzeStorage(startOpt, storage);
            // sd is locked but not opened
            switch(curState) {
                case NON_EXISTENT:
                    // name-node fails if any of the configured storage dirs are missing
                    throw new InconsistentFSStateException(sd.getRoot(), "storage directory does not exist or is not accessible.");
                case NOT_FORMATTED:
                    break;
                case NORMAL:
                    break;
                default:
                    // recovery is possible
                    sd.doRecover(curState);
            }
            if (curState != StorageState.NOT_FORMATTED && startOpt != StartupOption.ROLLBACK) {
                // read and verify consistency with other directories
                storage.readProperties(sd, startOpt);
                isFormatted = true;
            }
            if (startOpt == StartupOption.IMPORT && isFormatted)
                // import of a checkpoint is allowed only into empty image directories
                throw new IOException("Cannot import image from a checkpoint. " + " NameNode already contains an image in " + sd.getRoot());
        } catch (IOException ioe) {
            sd.unlock();
            throw ioe;
        }
        dataDirStates.put(sd, curState);
    }
    return isFormatted;
}
Also used : StorageState(org.apache.hadoop.hdfs.server.common.Storage.StorageState) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) IOException(java.io.IOException) InconsistentFSStateException(org.apache.hadoop.hdfs.server.common.InconsistentFSStateException)

Example 4 with InconsistentFSStateException

use of org.apache.hadoop.hdfs.server.common.InconsistentFSStateException in project hadoop by apache.

the class DataStorage method setFieldsFromProperties.

private void setFieldsFromProperties(Properties props, StorageDirectory sd, boolean overrideLayoutVersion, int toLayoutVersion) throws IOException {
    if (overrideLayoutVersion) {
        this.layoutVersion = toLayoutVersion;
    } else {
        setLayoutVersion(props, sd);
    }
    setcTime(props, sd);
    checkStorageType(props, sd);
    setClusterId(props, layoutVersion, sd);
    // Read NamespaceID in version before federation
    if (!DataNodeLayoutVersion.supports(LayoutVersion.Feature.FEDERATION, layoutVersion)) {
        setNamespaceID(props, sd);
    }
    // valid storage id, storage id may be empty
    String ssid = props.getProperty("storageID");
    if (ssid == null) {
        throw new InconsistentFSStateException(sd.getRoot(), "file " + STORAGE_FILE_VERSION + " is invalid.");
    }
    String sid = sd.getStorageUuid();
    if (!(sid == null || sid.equals("") || ssid.equals("") || sid.equals(ssid))) {
        throw new InconsistentFSStateException(sd.getRoot(), "has incompatible storage Id.");
    }
    if (sid == null) {
        // update id only if it was null
        sd.setStorageUuid(ssid);
    }
    // Update the datanode UUID if present.
    if (props.getProperty("datanodeUuid") != null) {
        String dnUuid = props.getProperty("datanodeUuid");
        if (getDatanodeUuid() == null) {
            setDatanodeUuid(dnUuid);
        } else if (getDatanodeUuid().compareTo(dnUuid) != 0) {
            throw new InconsistentFSStateException(sd.getRoot(), "Root " + sd.getRoot() + ": DatanodeUuid=" + dnUuid + ", does not match " + getDatanodeUuid() + " from other" + " StorageDirectory.");
        }
    }
}
Also used : InconsistentFSStateException(org.apache.hadoop.hdfs.server.common.InconsistentFSStateException)

Example 5 with InconsistentFSStateException

use of org.apache.hadoop.hdfs.server.common.InconsistentFSStateException in project hadoop by apache.

the class BlockPoolSliceStorage method doRollback.

/*
   * Roll back to old snapshot at the block pool level
   * If previous directory exists: 
   * <ol>
   * <li>Rename <SD>/current/<bpid>/current to removed.tmp</li>
   * <li>Rename * <SD>/current/<bpid>/previous to current</li>
   * <li>Remove removed.tmp</li>
   * </ol>
   * 
   * Do nothing if previous directory does not exist.
   * @param bpSd Block pool storage directory at <SD>/current/<bpid>
   */
void doRollback(StorageDirectory bpSd, NamespaceInfo nsInfo) throws IOException {
    File prevDir = bpSd.getPreviousDir();
    // regular startup if previous dir does not exist
    if (!prevDir.exists())
        return;
    // read attributes out of the VERSION file of previous directory
    BlockPoolSliceStorage prevInfo = new BlockPoolSliceStorage();
    prevInfo.readPreviousVersionProperties(bpSd);
    // && ( DN.previousCTime <= NN.ctime)
    if (!(prevInfo.getLayoutVersion() >= HdfsServerConstants.DATANODE_LAYOUT_VERSION && prevInfo.getCTime() <= nsInfo.getCTime())) {
        // cannot rollback
        throw new InconsistentFSStateException(bpSd.getRoot(), "Cannot rollback to a newer state.\nDatanode previous state: LV = " + prevInfo.getLayoutVersion() + " CTime = " + prevInfo.getCTime() + " is newer than the namespace state: LV = " + HdfsServerConstants.DATANODE_LAYOUT_VERSION + " CTime = " + nsInfo.getCTime());
    }
    LOG.info("Rolling back storage directory " + bpSd.getRoot() + ".\n   target LV = " + nsInfo.getLayoutVersion() + "; target CTime = " + nsInfo.getCTime());
    File tmpDir = bpSd.getRemovedTmp();
    assert !tmpDir.exists() : "removed.tmp directory must not exist.";
    // 1. rename current to tmp
    File curDir = bpSd.getCurrentDir();
    assert curDir.exists() : "Current directory must exist.";
    rename(curDir, tmpDir);
    // 2. rename previous to current
    rename(prevDir, curDir);
    // 3. delete removed.tmp dir
    deleteDir(tmpDir);
    LOG.info("Rollback of " + bpSd.getRoot() + " is complete");
}
Also used : File(java.io.File) InconsistentFSStateException(org.apache.hadoop.hdfs.server.common.InconsistentFSStateException)

Aggregations

InconsistentFSStateException (org.apache.hadoop.hdfs.server.common.InconsistentFSStateException)8 StorageDirectory (org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory)4 File (java.io.File)3 IOException (java.io.IOException)3 StorageState (org.apache.hadoop.hdfs.server.common.Storage.StorageState)3 RandomAccessFile (java.io.RandomAccessFile)1 URI (java.net.URI)1 HashMap (java.util.HashMap)1 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)1 Util.fileAsURI (org.apache.hadoop.hdfs.server.common.Util.fileAsURI)1 NameNodeFile (org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile)1 MD5Hash (org.apache.hadoop.io.MD5Hash)1 CoreMatchers.containsString (org.hamcrest.CoreMatchers.containsString)1 Test (org.junit.Test)1