Search in sources :

Example 1 with SecondaryNameNode

use of org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode in project hadoop by apache.

the class TestHAConfiguration method testSecondaryNameNodeDoesNotStart.

/**
   * Test that the 2NN does not start if given a config with HA NNs.
   */
@Test
public void testSecondaryNameNodeDoesNotStart() throws IOException {
    // Note we're not explicitly setting the nameservice Id in the
    // config as it is not required to be set and we want to test
    // that we can determine if HA is enabled when the nameservice Id
    // is not explicitly defined.
    Configuration conf = getHAConf("ns1", "1.2.3.1", "1.2.3.2");
    try {
        new SecondaryNameNode(conf);
        fail("Created a 2NN with an HA config");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("Cannot use SecondaryNameNode in an HA cluster", ioe);
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) SecondaryNameNode(org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode) IOException(java.io.IOException) Test(org.junit.Test)

Example 2 with SecondaryNameNode

use of org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode in project hadoop by apache.

the class TestCheckpointsWithSnapshots method testCheckpoint.

/**
   * Regression test for HDFS-5433 - "When reloading fsimage during
   * checkpointing, we should clear existing snapshottable directories"
   */
@Test
public void testCheckpoint() throws IOException {
    MiniDFSCluster cluster = null;
    SecondaryNameNode secondary = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).build();
        cluster.waitActive();
        secondary = new SecondaryNameNode(conf);
        SnapshotManager nnSnapshotManager = cluster.getNamesystem().getSnapshotManager();
        SnapshotManager secondarySnapshotManager = secondary.getFSNamesystem().getSnapshotManager();
        FileSystem fs = cluster.getFileSystem();
        HdfsAdmin admin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
        assertEquals(0, nnSnapshotManager.getNumSnapshots());
        assertEquals(0, nnSnapshotManager.getNumSnapshottableDirs());
        assertEquals(0, secondarySnapshotManager.getNumSnapshots());
        assertEquals(0, secondarySnapshotManager.getNumSnapshottableDirs());
        // 1. Create a snapshottable directory foo on the NN.
        fs.mkdirs(TEST_PATH);
        admin.allowSnapshot(TEST_PATH);
        assertEquals(0, nnSnapshotManager.getNumSnapshots());
        assertEquals(1, nnSnapshotManager.getNumSnapshottableDirs());
        // 2. Create a snapshot of the dir foo. This will be referenced both in
        // the SnapshotManager as well as in the file system tree. The snapshot
        // count will go up to 1.
        Path snapshotPath = fs.createSnapshot(TEST_PATH);
        assertEquals(1, nnSnapshotManager.getNumSnapshots());
        assertEquals(1, nnSnapshotManager.getNumSnapshottableDirs());
        // 3. Start up a 2NN and have it do a checkpoint. It will have foo and its
        // snapshot in its list of snapshottable dirs referenced from the
        // SnapshotManager, as well as in the file system tree.
        secondary.doCheckpoint();
        assertEquals(1, secondarySnapshotManager.getNumSnapshots());
        assertEquals(1, secondarySnapshotManager.getNumSnapshottableDirs());
        // 4. Disallow snapshots on and delete foo on the NN. The snapshot count
        // will go down to 0 and the snapshottable dir will be removed from the fs
        // tree.
        fs.deleteSnapshot(TEST_PATH, snapshotPath.getName());
        admin.disallowSnapshot(TEST_PATH);
        assertEquals(0, nnSnapshotManager.getNumSnapshots());
        assertEquals(0, nnSnapshotManager.getNumSnapshottableDirs());
        // 5. Have the NN do a saveNamespace, writing out a new fsimage with
        // snapshot count 0.
        NameNodeAdapter.enterSafeMode(cluster.getNameNode(), false);
        NameNodeAdapter.saveNamespace(cluster.getNameNode());
        NameNodeAdapter.leaveSafeMode(cluster.getNameNode());
        // 6. Have the still-running 2NN do a checkpoint. It will notice that the
        // fsimage has changed on the NN and redownload/reload from that image.
        // This will replace all INodes in the file system tree as well as reset
        // the snapshot counter to 0 in the SnapshotManager. However, it will not
        // clear the list of snapshottable dirs referenced from the
        // SnapshotManager. When it writes out an fsimage, the 2NN will write out
        // 0 for the snapshot count, but still serialize the snapshottable dir
        // referenced in the SnapshotManager even though it no longer appears in
        // the file system tree. The NN will not be able to start up with this.
        secondary.doCheckpoint();
        assertEquals(0, secondarySnapshotManager.getNumSnapshots());
        assertEquals(0, secondarySnapshotManager.getNumSnapshottableDirs());
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
        if (secondary != null) {
            secondary.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsAdmin(org.apache.hadoop.hdfs.client.HdfsAdmin) SecondaryNameNode(org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode) FileSystem(org.apache.hadoop.fs.FileSystem) Test(org.junit.Test)

Example 3 with SecondaryNameNode

use of org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode in project hadoop by apache.

the class TestRollingUpgrade method testCheckpointWithSNN.

/**
   * In non-HA setup, after rolling upgrade prepare, the Secondary NN should
   * still be able to do checkpoint
   */
@Test
public void testCheckpointWithSNN() throws Exception {
    MiniDFSCluster cluster = null;
    DistributedFileSystem dfs = null;
    SecondaryNameNode snn = null;
    try {
        Configuration conf = new HdfsConfiguration();
        cluster = new MiniDFSCluster.Builder(conf).build();
        cluster.waitActive();
        conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0");
        snn = new SecondaryNameNode(conf);
        dfs = cluster.getFileSystem();
        dfs.mkdirs(new Path("/test/foo"));
        snn.doCheckpoint();
        //start rolling upgrade
        dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
        dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
        dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
        dfs.mkdirs(new Path("/test/bar"));
        // do checkpoint in SNN again
        snn.doCheckpoint();
    } finally {
        IOUtils.cleanup(null, dfs);
        if (snn != null) {
            snn.shutdown();
        }
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) SecondaryNameNode(org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode) Test(org.junit.Test)

Aggregations

SecondaryNameNode (org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode)3 Test (org.junit.Test)3 Configuration (org.apache.hadoop.conf.Configuration)2 Path (org.apache.hadoop.fs.Path)2 IOException (java.io.IOException)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)1 HdfsAdmin (org.apache.hadoop.hdfs.client.HdfsAdmin)1