Search in sources :

Example 21 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class TestEditLog method assertExistsInStorageDirs.

private void assertExistsInStorageDirs(MiniDFSCluster cluster, NameNodeDirType dirType, String filename) {
    NNStorage storage = cluster.getNamesystem().getFSImage().getStorage();
    for (StorageDirectory sd : storage.dirIterable(dirType)) {
        File f = new File(sd.getCurrentDir(), filename);
        assertTrue("Expect that " + f + " exists", f.exists());
    }
}
Also used : StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File)

Example 22 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class TestCheckpoint method testSecondaryImageDownload.

/**
   * Test that the secondary doesn't have to re-download image
   * if it hasn't changed.
   */
@Test
public void testSecondaryImageDownload() throws IOException {
    LOG.info("Starting testSecondaryImageDownload");
    Configuration conf = new HdfsConfiguration();
    conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0");
    Path dir = new Path("/checkpoint");
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(true).build();
    cluster.waitActive();
    FileSystem fileSys = cluster.getFileSystem();
    FSImage image = cluster.getNameNode().getFSImage();
    SecondaryNameNode secondary = null;
    try {
        assertTrue(!fileSys.exists(dir));
        //
        // Make the checkpoint
        //
        secondary = startSecondaryNameNode(conf);
        File secondaryDir = MiniDFSCluster.getCheckpointDirectory(MiniDFSCluster.getBaseDirectory(), 0, 0)[0];
        File secondaryCurrent = new File(secondaryDir, "current");
        long expectedTxIdToDownload = cluster.getNameNode().getFSImage().getStorage().getMostRecentCheckpointTxId();
        File secondaryFsImageBefore = new File(secondaryCurrent, NNStorage.getImageFileName(expectedTxIdToDownload));
        File secondaryFsImageAfter = new File(secondaryCurrent, NNStorage.getImageFileName(expectedTxIdToDownload + 2));
        assertFalse("Secondary should start with empty current/ dir " + "but " + secondaryFsImageBefore + " exists", secondaryFsImageBefore.exists());
        assertTrue("Secondary should have loaded an image", secondary.doCheckpoint());
        assertTrue("Secondary should have downloaded original image", secondaryFsImageBefore.exists());
        assertTrue("Secondary should have created a new image", secondaryFsImageAfter.exists());
        long fsimageLength = secondaryFsImageBefore.length();
        assertEquals("Image size should not have changed", fsimageLength, secondaryFsImageAfter.length());
        // change namespace
        fileSys.mkdirs(dir);
        assertFalse("Another checkpoint should not have to re-load image", secondary.doCheckpoint());
        for (StorageDirectory sd : image.getStorage().dirIterable(NameNodeDirType.IMAGE)) {
            File imageFile = NNStorage.getImageFile(sd, NameNodeFile.IMAGE, expectedTxIdToDownload + 5);
            assertTrue("Image size increased", imageFile.length() > fsimageLength);
        }
    } finally {
        fileSys.close();
        cleanup(secondary);
        secondary = null;
        cleanup(cluster);
        cluster = null;
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) RandomAccessFile(java.io.RandomAccessFile) EditLogFile(org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile) NameNodeFile(org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile) File(java.io.File) Test(org.junit.Test)

Example 23 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class TestCheckpoint method testSeparateEditsDirLocking.

/**
   * Test that, if the edits dir is separate from the name dir, it is
   * properly locked.
   **/
@Test
public void testSeparateEditsDirLocking() throws IOException {
    Configuration conf = new HdfsConfiguration();
    File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name");
    File editsDir = new File(MiniDFSCluster.getBaseDirectory(), "testSeparateEditsDirLocking");
    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath());
    conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, editsDir.getAbsolutePath());
    MiniDFSCluster cluster = null;
    // Start a NN, and verify that lock() fails in all of the configured
    // directories
    StorageDirectory savedSd = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).manageNameDfsDirs(false).numDataNodes(0).build();
        NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
        for (StorageDirectory sd : storage.dirIterable(NameNodeDirType.EDITS)) {
            assertEquals(editsDir.getAbsoluteFile(), sd.getRoot());
            assertLockFails(sd);
            savedSd = sd;
        }
    } finally {
        cleanup(cluster);
        cluster = null;
    }
    assertNotNull(savedSd);
    // Lock one of the saved directories, then start the NN, and make sure it
    // fails to start
    assertClusterStartFailsWhenDirLocked(conf, savedSd);
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) RandomAccessFile(java.io.RandomAccessFile) EditLogFile(org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile) NameNodeFile(org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile) File(java.io.File) Test(org.junit.Test)

Example 24 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class TestCheckpoint method testSecondaryNameNodeLocking.

/**
   * Test that the SecondaryNameNode properly locks its storage directories.
   */
@Test
public void testSecondaryNameNodeLocking() throws Exception {
    // Start a primary NN so that the secondary will start successfully
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = null;
    SecondaryNameNode secondary = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
        StorageDirectory savedSd = null;
        // Start a secondary NN, then make sure that all of its storage
        // dirs got locked.
        secondary = startSecondaryNameNode(conf);
        NNStorage storage = secondary.getFSImage().getStorage();
        for (StorageDirectory sd : storage.dirIterable(null)) {
            assertLockFails(sd);
            savedSd = sd;
        }
        LOG.info("===> Shutting down first 2NN");
        secondary.shutdown();
        secondary = null;
        LOG.info("===> Locking a dir, starting second 2NN");
        // Lock one of its dirs, make sure it fails to start
        LOG.info("Trying to lock" + savedSd);
        savedSd.lock();
        try {
            secondary = startSecondaryNameNode(conf);
            assertFalse("Should fail to start 2NN when " + savedSd + " is locked", savedSd.isLockSupported());
        } catch (IOException ioe) {
            GenericTestUtils.assertExceptionContains("already locked", ioe);
        } finally {
            savedSd.unlock();
        }
    } finally {
        cleanup(secondary);
        secondary = null;
        cleanup(cluster);
        cluster = null;
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) IOException(java.io.IOException) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Test(org.junit.Test)

Example 25 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class TestCheckpoint method testDeleteTemporaryEditsOnStartup.

/**
   * Test that the secondary namenode correctly deletes temporary edits
   * on startup.
   */
@Test(timeout = 60000)
public void testDeleteTemporaryEditsOnStartup() throws IOException {
    Configuration conf = new HdfsConfiguration();
    SecondaryNameNode secondary = null;
    MiniDFSCluster cluster = null;
    FileSystem fs = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
        cluster.waitActive();
        fs = cluster.getFileSystem();
        secondary = startSecondaryNameNode(conf);
        DFSTestUtil.createFile(fs, new Path("tmpfile0"), 1024, (short) 1, 0l);
        secondary.doCheckpoint();
        // Cause edit rename to fail during next checkpoint
        Mockito.doThrow(new IOException("Injecting failure before edit rename")).when(faultInjector).beforeEditsRename();
        DFSTestUtil.createFile(fs, new Path("tmpfile1"), 1024, (short) 1, 0l);
        try {
            secondary.doCheckpoint();
            fail("Fault injection failed.");
        } catch (IOException ioe) {
            GenericTestUtils.assertExceptionContains("Injecting failure before edit rename", ioe);
        }
        Mockito.reset(faultInjector);
        // Verify that a temp edits file is present
        for (StorageDirectory sd : secondary.getFSImage().getStorage().dirIterable(NameNodeDirType.EDITS)) {
            File[] tmpEdits = sd.getCurrentDir().listFiles(tmpEditsFilter);
            assertTrue("Expected a single tmp edits file in directory " + sd.toString(), tmpEdits.length == 1);
        }
        // Restart 2NN
        secondary.shutdown();
        secondary = startSecondaryNameNode(conf);
        // Verify that tmp files were deleted
        for (StorageDirectory sd : secondary.getFSImage().getStorage().dirIterable(NameNodeDirType.EDITS)) {
            File[] tmpEdits = sd.getCurrentDir().listFiles(tmpEditsFilter);
            assertTrue("Did not expect a tmp edits file in directory " + sd.toString(), tmpEdits.length == 0);
        }
        // Next checkpoint should succeed
        secondary.doCheckpoint();
    } finally {
        if (secondary != null) {
            secondary.shutdown();
        }
        if (fs != null) {
            fs.close();
        }
        if (cluster != null) {
            cluster.shutdown();
        }
        Mockito.reset(faultInjector);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) IOException(java.io.IOException) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) RandomAccessFile(java.io.RandomAccessFile) EditLogFile(org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile) NameNodeFile(org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile) File(java.io.File) Test(org.junit.Test)

Aggregations

StorageDirectory (org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory)83 File (java.io.File)59 Test (org.junit.Test)45 RandomAccessFile (java.io.RandomAccessFile)29 IOException (java.io.IOException)24 Configuration (org.apache.hadoop.conf.Configuration)22 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)21 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)20 EditLogFile (org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile)19 NameNodeFile (org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile)15 URI (java.net.URI)11 FileSystem (org.apache.hadoop.fs.FileSystem)11 Path (org.apache.hadoop.fs.Path)10 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)9 FSImageFile (org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile)7 FileJournalManager.getLogFile (org.apache.hadoop.hdfs.server.namenode.FileJournalManager.getLogFile)6 InconsistentFSStateException (org.apache.hadoop.hdfs.server.common.InconsistentFSStateException)5 AbortSpec (org.apache.hadoop.hdfs.server.namenode.TestEditLog.AbortSpec)5 ArrayList (java.util.ArrayList)4 StorageState (org.apache.hadoop.hdfs.server.common.Storage.StorageState)4