Search in sources :

Example 66 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class FSImageTestUtil method createStandaloneEditLog.

/**
   * Return a standalone instance of FSEditLog that will log into the given
   * log directory. The returned instance is not yet opened.
   */
public static FSEditLog createStandaloneEditLog(File logDir) throws IOException {
    assertTrue(logDir.mkdirs() || logDir.exists());
    if (!FileUtil.fullyDeleteContents(logDir)) {
        throw new IOException("Unable to delete contents of " + logDir);
    }
    NNStorage storage = Mockito.mock(NNStorage.class);
    StorageDirectory sd = FSImageTestUtil.mockStorageDirectory(logDir, NameNodeDirType.EDITS);
    List<StorageDirectory> sds = Lists.newArrayList(sd);
    Mockito.doReturn(sds).when(storage).dirIterable(NameNodeDirType.EDITS);
    Mockito.doReturn(sd).when(storage).getStorageDirectory(Matchers.<URI>anyObject());
    FSEditLog editLog = new FSEditLog(new Configuration(), storage, ImmutableList.of(logDir.toURI()));
    editLog.initJournalsForWrite();
    return editLog;
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) IOException(java.io.IOException) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory)

Example 67 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class FSImageTestUtil method mockStorageDirectory.

/**
   * Make a mock storage directory that returns some set of file contents.
   * @param type type of storage dir
   * @param previousExists should we mock that the previous/ dir exists?
   * @param fileNames the names of files contained in current/
   */
static StorageDirectory mockStorageDirectory(StorageDirType type, boolean previousExists, String... fileNames) {
    StorageDirectory sd = mock(StorageDirectory.class);
    doReturn(type).when(sd).getStorageDirType();
    // Version file should always exist
    doReturn(mockFile(true)).when(sd).getVersionFile();
    doReturn(mockFile(true)).when(sd).getRoot();
    // Previous dir optionally exists
    doReturn(mockFile(previousExists)).when(sd).getPreviousDir();
    // Return a mock 'current' directory which has the given paths
    File[] files = new File[fileNames.length];
    for (int i = 0; i < fileNames.length; i++) {
        files[i] = new File(fileNames[i]);
    }
    File mockDir = Mockito.spy(new File("/dir/current"));
    doReturn(files).when(mockDir).listFiles();
    doReturn(mockDir).when(sd).getCurrentDir();
    return sd;
}
Also used : StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) RandomAccessFile(java.io.RandomAccessFile) EditLogFile(org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile) FSImageFile(org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile) File(java.io.File)

Example 68 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class TestCheckPointForSecurityTokens method testSaveNamespace.

/**
   * Tests save namespace.
   */
@Test
public void testSaveNamespace() throws IOException {
    DistributedFileSystem fs = null;
    try {
        Configuration conf = new HdfsConfiguration();
        conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
        cluster.waitActive();
        fs = cluster.getFileSystem();
        FSNamesystem namesystem = cluster.getNamesystem();
        String renewer = UserGroupInformation.getLoginUser().getUserName();
        Token<DelegationTokenIdentifier> token1 = namesystem.getDelegationToken(new Text(renewer));
        Token<DelegationTokenIdentifier> token2 = namesystem.getDelegationToken(new Text(renewer));
        // Saving image without safe mode should fail
        DFSAdmin admin = new DFSAdmin(conf);
        String[] args = new String[] { "-saveNamespace" };
        // verify that the edits file is NOT empty
        NameNode nn = cluster.getNameNode();
        for (StorageDirectory sd : nn.getFSImage().getStorage().dirIterable(null)) {
            EditLogFile log = FSImageTestUtil.findLatestEditsLog(sd);
            assertTrue(log.isInProgress());
            log.scanLog(Long.MAX_VALUE, true);
            long numTransactions = (log.getLastTxId() - log.getFirstTxId()) + 1;
            assertEquals("In-progress log " + log + " should have 5 transactions", 5, numTransactions);
            ;
        }
        // Saving image in safe mode should succeed
        fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
        try {
            admin.run(args);
        } catch (Exception e) {
            throw new IOException(e.getMessage());
        }
        // verify that the edits file is empty except for the START txn
        for (StorageDirectory sd : nn.getFSImage().getStorage().dirIterable(null)) {
            EditLogFile log = FSImageTestUtil.findLatestEditsLog(sd);
            assertTrue(log.isInProgress());
            log.scanLog(Long.MAX_VALUE, true);
            long numTransactions = (log.getLastTxId() - log.getFirstTxId()) + 1;
            assertEquals("In-progress log " + log + " should only have START txn", 1, numTransactions);
        }
        // restart cluster
        cluster.shutdown();
        cluster = null;
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
        cluster.waitActive();
        //Should be able to renew & cancel the delegation token after cluster restart
        try {
            renewToken(token1);
            renewToken(token2);
        } catch (IOException e) {
            fail("Could not renew or cancel the token");
        }
        namesystem = cluster.getNamesystem();
        Token<DelegationTokenIdentifier> token3 = namesystem.getDelegationToken(new Text(renewer));
        Token<DelegationTokenIdentifier> token4 = namesystem.getDelegationToken(new Text(renewer));
        // restart cluster again
        cluster.shutdown();
        cluster = null;
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
        cluster.waitActive();
        namesystem = cluster.getNamesystem();
        Token<DelegationTokenIdentifier> token5 = namesystem.getDelegationToken(new Text(renewer));
        try {
            renewToken(token1);
            renewToken(token2);
            renewToken(token3);
            renewToken(token4);
            renewToken(token5);
        } catch (IOException e) {
            fail("Could not renew or cancel the token");
        }
        // restart cluster again
        cluster.shutdown();
        cluster = null;
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
        cluster.waitActive();
        namesystem = cluster.getNamesystem();
        try {
            renewToken(token1);
            cancelToken(token1);
            renewToken(token2);
            cancelToken(token2);
            renewToken(token3);
            cancelToken(token3);
            renewToken(token4);
            cancelToken(token4);
            renewToken(token5);
            cancelToken(token5);
        } catch (IOException e) {
            fail("Could not renew or cancel the token");
        }
    } finally {
        if (fs != null)
            fs.close();
        if (cluster != null)
            cluster.shutdown();
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) DelegationTokenIdentifier(org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier) EditLogFile(org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile) Text(org.apache.hadoop.io.Text) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) IOException(java.io.IOException) DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) Test(org.junit.Test)

Example 69 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class TestCheckpoint method testWriteTransactionIdHandlesIOE.

/**
   * Checks that an IOException in NNStorage.writeTransactionIdFile is handled
   * correctly (by removing the storage directory)
   * See https://issues.apache.org/jira/browse/HDFS-2011
   */
@Test
public void testWriteTransactionIdHandlesIOE() throws Exception {
    LOG.info("Check IOException handled correctly by writeTransactionIdFile");
    ArrayList<URI> fsImageDirs = new ArrayList<URI>();
    ArrayList<URI> editsDirs = new ArrayList<URI>();
    File filePath = new File(PathUtils.getTestDir(getClass()), "storageDirToCheck");
    assertTrue("Couldn't create directory storageDirToCheck", filePath.exists() || filePath.mkdirs());
    fsImageDirs.add(filePath.toURI());
    editsDirs.add(filePath.toURI());
    NNStorage nnStorage = new NNStorage(new HdfsConfiguration(), fsImageDirs, editsDirs);
    try {
        assertTrue("List of storage directories didn't have storageDirToCheck.", nnStorage.getEditsDirectories().iterator().next().toString().indexOf("storageDirToCheck") != -1);
        assertTrue("List of removed storage directories wasn't empty", nnStorage.getRemovedStorageDirs().isEmpty());
    } finally {
        // Delete storage directory to cause IOException in writeTransactionIdFile 
        assertTrue("Couldn't remove directory " + filePath.getAbsolutePath(), filePath.delete());
    }
    // Just call writeTransactionIdFile using any random number
    nnStorage.writeTransactionIdFileToStorage(1);
    List<StorageDirectory> listRsd = nnStorage.getRemovedStorageDirs();
    assertTrue("Removed directory wasn't what was expected", listRsd.size() > 0 && listRsd.get(listRsd.size() - 1).getRoot().toString().indexOf("storageDirToCheck") != -1);
    nnStorage.close();
}
Also used : ArrayList(java.util.ArrayList) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) URI(java.net.URI) Util.fileAsURI(org.apache.hadoop.hdfs.server.common.Util.fileAsURI) RandomAccessFile(java.io.RandomAccessFile) EditLogFile(org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile) NameNodeFile(org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile) File(java.io.File) Test(org.junit.Test)

Example 70 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class TestCheckpoint method testCheckpointWithSeparateDirsAfterNameFails.

/**
   * Test case where the NN is configured with a name-only and an edits-only
   * dir, with storage-restore turned on. In this case, if the name-only dir
   * disappears and comes back, a new checkpoint after it has been restored
   * should function correctly.
   * @throws Exception
   */
@Test
public void testCheckpointWithSeparateDirsAfterNameFails() throws Exception {
    MiniDFSCluster cluster = null;
    SecondaryNameNode secondary = null;
    File currentDir = null;
    Configuration conf = new HdfsConfiguration();
    File base_dir = new File(MiniDFSCluster.getBaseDirectory());
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY, true);
    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, MiniDFSCluster.getBaseDirectory() + "/name-only");
    conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, MiniDFSCluster.getBaseDirectory() + "/edits-only");
    conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, fileAsURI(new File(base_dir, "namesecondary1")).toString());
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).manageNameDfsDirs(false).build();
        secondary = startSecondaryNameNode(conf);
        // Checkpoint once
        secondary.doCheckpoint();
        // Now primary NN experiences failure of its only name dir -- fake by
        // setting its current dir to a-x permissions
        NamenodeProtocols nn = cluster.getNameNodeRpc();
        NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
        StorageDirectory sd0 = storage.getStorageDir(0);
        assertEquals(NameNodeDirType.IMAGE, sd0.getStorageDirType());
        currentDir = sd0.getCurrentDir();
        assertEquals(0, FileUtil.chmod(currentDir.getAbsolutePath(), "000"));
        // valid storage dirs
        try {
            secondary.doCheckpoint();
            fail("Did not fail to checkpoint when there are no valid storage dirs");
        } catch (IOException ioe) {
            GenericTestUtils.assertExceptionContains("No targets in destination storage", ioe);
        }
        // Restore the good dir
        assertEquals(0, FileUtil.chmod(currentDir.getAbsolutePath(), "755"));
        nn.restoreFailedStorage("true");
        nn.rollEditLog();
        // Checkpoint again -- this should upload to the restored name dir
        secondary.doCheckpoint();
        assertNNHasCheckpoints(cluster, ImmutableList.of(8));
        assertParallelFilesInvariant(cluster, ImmutableList.of(secondary));
    } finally {
        if (currentDir != null) {
            FileUtil.chmod(currentDir.getAbsolutePath(), "755");
        }
        cleanup(secondary);
        secondary = null;
        cleanup(cluster);
        cluster = null;
    }
}
Also used : NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) IOException(java.io.IOException) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) RandomAccessFile(java.io.RandomAccessFile) EditLogFile(org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile) NameNodeFile(org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile) File(java.io.File) Test(org.junit.Test)

Aggregations

StorageDirectory (org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory)83 File (java.io.File)59 Test (org.junit.Test)45 RandomAccessFile (java.io.RandomAccessFile)29 IOException (java.io.IOException)24 Configuration (org.apache.hadoop.conf.Configuration)22 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)21 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)20 EditLogFile (org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile)19 NameNodeFile (org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile)15 URI (java.net.URI)11 FileSystem (org.apache.hadoop.fs.FileSystem)11 Path (org.apache.hadoop.fs.Path)10 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)9 FSImageFile (org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile)7 FileJournalManager.getLogFile (org.apache.hadoop.hdfs.server.namenode.FileJournalManager.getLogFile)6 InconsistentFSStateException (org.apache.hadoop.hdfs.server.common.InconsistentFSStateException)5 AbortSpec (org.apache.hadoop.hdfs.server.namenode.TestEditLog.AbortSpec)5 ArrayList (java.util.ArrayList)4 StorageState (org.apache.hadoop.hdfs.server.common.Storage.StorageState)4