Search in sources :

Example 11 with MiniQJMHACluster

use of org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster in project hadoop by apache.

the class TestDFSUpgradeWithHA method testRollbackWithJournalNodes.

@Test
public void testRollbackWithJournalNodes() throws IOException, URISyntaxException {
    MiniQJMHACluster qjCluster = null;
    FileSystem fs = null;
    try {
        Builder builder = new MiniQJMHACluster.Builder(conf);
        builder.getDfsBuilder().numDataNodes(0);
        qjCluster = builder.build();
        MiniDFSCluster cluster = qjCluster.getDfsCluster();
        // No upgrade is in progress at the moment.
        checkClusterPreviousDirExistence(cluster, false);
        assertCTimesEqual(cluster);
        checkJnPreviousDirExistence(qjCluster, false);
        // Transition NN0 to active and do some FS ops.
        cluster.transitionToActive(0);
        fs = HATestUtil.configureFailoverFs(cluster, conf);
        assertTrue(fs.mkdirs(new Path("/foo1")));
        final long cidBeforeUpgrade = getCommittedTxnIdValue(qjCluster);
        // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade
        // flag.
        cluster.shutdownNameNode(1);
        cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
        cluster.restartNameNode(0, false);
        checkNnPreviousDirExistence(cluster, 0, true);
        checkNnPreviousDirExistence(cluster, 1, false);
        checkJnPreviousDirExistence(qjCluster, true);
        // NN0 should come up in the active state when given the -upgrade option,
        // so no need to transition it to active.
        assertTrue(fs.mkdirs(new Path("/foo2")));
        final long cidDuringUpgrade = getCommittedTxnIdValue(qjCluster);
        assertTrue(cidDuringUpgrade > cidBeforeUpgrade);
        // Now bootstrap the standby with the upgraded info.
        int rc = BootstrapStandby.run(new String[] { "-force" }, cluster.getConfiguration(1));
        assertEquals(0, rc);
        cluster.restartNameNode(1);
        checkNnPreviousDirExistence(cluster, 0, true);
        checkNnPreviousDirExistence(cluster, 1, true);
        checkJnPreviousDirExistence(qjCluster, true);
        assertCTimesEqual(cluster);
        // Shut down the NNs, but deliberately leave the JNs up and running.
        Collection<URI> nn1NameDirs = cluster.getNameDirs(0);
        cluster.shutdown();
        conf.setStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, Joiner.on(",").join(nn1NameDirs));
        NameNode.doRollback(conf, false);
        final long cidAfterRollback = getCommittedTxnIdValue(qjCluster);
        assertTrue(cidBeforeUpgrade < cidAfterRollback);
        // make sure the committedTxnId has been reset correctly after rollback
        assertTrue(cidDuringUpgrade > cidAfterRollback);
        // The rollback operation should have rolled back the first NN's local
        // dirs, and the shared dir, but not the other NN's dirs. Those have to be
        // done by bootstrapping the standby.
        checkNnPreviousDirExistence(cluster, 0, false);
        checkJnPreviousDirExistence(qjCluster, false);
    } finally {
        if (fs != null) {
            fs.close();
        }
        if (qjCluster != null) {
            qjCluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) FileSystem(org.apache.hadoop.fs.FileSystem) Builder(org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster.Builder) MiniQJMHACluster(org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster) URI(java.net.URI) Test(org.junit.Test)

Example 12 with MiniQJMHACluster

use of org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster in project hadoop by apache.

the class TestDFSUpgradeWithHA method testUpgradeWithJournalNodes.

/**
   * Make sure that an HA NN can successfully upgrade when configured using
   * JournalNodes.
   */
@Test
public void testUpgradeWithJournalNodes() throws IOException, URISyntaxException {
    MiniQJMHACluster qjCluster = null;
    FileSystem fs = null;
    try {
        Builder builder = new MiniQJMHACluster.Builder(conf);
        builder.getDfsBuilder().numDataNodes(0);
        qjCluster = builder.build();
        MiniDFSCluster cluster = qjCluster.getDfsCluster();
        // No upgrade is in progress at the moment.
        checkJnPreviousDirExistence(qjCluster, false);
        checkClusterPreviousDirExistence(cluster, false);
        assertCTimesEqual(cluster);
        // Transition NN0 to active and do some FS ops.
        cluster.transitionToActive(0);
        fs = HATestUtil.configureFailoverFs(cluster, conf);
        assertTrue(fs.mkdirs(new Path("/foo1")));
        // get the value of the committedTxnId in journal nodes
        final long cidBeforeUpgrade = getCommittedTxnIdValue(qjCluster);
        // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade
        // flag.
        cluster.shutdownNameNode(1);
        cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
        cluster.restartNameNode(0, false);
        checkNnPreviousDirExistence(cluster, 0, true);
        checkNnPreviousDirExistence(cluster, 1, false);
        checkJnPreviousDirExistence(qjCluster, true);
        assertTrue(cidBeforeUpgrade <= getCommittedTxnIdValue(qjCluster));
        // NN0 should come up in the active state when given the -upgrade option,
        // so no need to transition it to active.
        assertTrue(fs.mkdirs(new Path("/foo2")));
        // Restart NN0 without the -upgrade flag, to make sure that works.
        cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR);
        cluster.restartNameNode(0, false);
        // Make sure we can still do FS ops after upgrading.
        cluster.transitionToActive(0);
        assertTrue(fs.mkdirs(new Path("/foo3")));
        assertTrue(getCommittedTxnIdValue(qjCluster) > cidBeforeUpgrade);
        // Now bootstrap the standby with the upgraded info.
        int rc = BootstrapStandby.run(new String[] { "-force" }, cluster.getConfiguration(1));
        assertEquals(0, rc);
        // Now restart NN1 and make sure that we can do ops against that as well.
        cluster.restartNameNode(1);
        cluster.transitionToStandby(0);
        cluster.transitionToActive(1);
        assertTrue(fs.mkdirs(new Path("/foo4")));
        assertCTimesEqual(cluster);
    } finally {
        if (fs != null) {
            fs.close();
        }
        if (qjCluster != null) {
            qjCluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) FileSystem(org.apache.hadoop.fs.FileSystem) Builder(org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster.Builder) MiniQJMHACluster(org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster) Test(org.junit.Test)

Example 13 with MiniQJMHACluster

use of org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster in project hadoop by apache.

the class TestDFSUpgradeWithHA method testFinalizeFromSecondNameNodeWithJournalNodes.

/**
   * Make sure that even if the NN which initiated the upgrade is in the standby
   * state that we're allowed to finalize.
   */
@Test
public void testFinalizeFromSecondNameNodeWithJournalNodes() throws IOException, URISyntaxException {
    MiniQJMHACluster qjCluster = null;
    FileSystem fs = null;
    try {
        Builder builder = new MiniQJMHACluster.Builder(conf);
        builder.getDfsBuilder().numDataNodes(0);
        qjCluster = builder.build();
        MiniDFSCluster cluster = qjCluster.getDfsCluster();
        // No upgrade is in progress at the moment.
        checkJnPreviousDirExistence(qjCluster, false);
        checkClusterPreviousDirExistence(cluster, false);
        assertCTimesEqual(cluster);
        // Transition NN0 to active and do some FS ops.
        cluster.transitionToActive(0);
        fs = HATestUtil.configureFailoverFs(cluster, conf);
        assertTrue(fs.mkdirs(new Path("/foo1")));
        // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade
        // flag.
        cluster.shutdownNameNode(1);
        cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
        cluster.restartNameNode(0, false);
        checkNnPreviousDirExistence(cluster, 0, true);
        checkNnPreviousDirExistence(cluster, 1, false);
        checkJnPreviousDirExistence(qjCluster, true);
        // Now bootstrap the standby with the upgraded info.
        int rc = BootstrapStandby.run(new String[] { "-force" }, cluster.getConfiguration(1));
        assertEquals(0, rc);
        cluster.restartNameNode(1);
        // Make the second NN (not the one that initiated the upgrade) active when
        // the finalize command is run.
        cluster.transitionToStandby(0);
        cluster.transitionToActive(1);
        runFinalizeCommand(cluster);
        checkClusterPreviousDirExistence(cluster, false);
        checkJnPreviousDirExistence(qjCluster, false);
        assertCTimesEqual(cluster);
    } finally {
        if (fs != null) {
            fs.close();
        }
        if (qjCluster != null) {
            qjCluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) FileSystem(org.apache.hadoop.fs.FileSystem) Builder(org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster.Builder) MiniQJMHACluster(org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster) Test(org.junit.Test)

Example 14 with MiniQJMHACluster

use of org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster in project hadoop by apache.

the class TestDFSUpgradeWithHA method testFinalizeWithJournalNodes.

@Test
public void testFinalizeWithJournalNodes() throws IOException, URISyntaxException {
    MiniQJMHACluster qjCluster = null;
    FileSystem fs = null;
    try {
        Builder builder = new MiniQJMHACluster.Builder(conf);
        builder.getDfsBuilder().numDataNodes(0);
        qjCluster = builder.build();
        MiniDFSCluster cluster = qjCluster.getDfsCluster();
        // No upgrade is in progress at the moment.
        checkJnPreviousDirExistence(qjCluster, false);
        checkClusterPreviousDirExistence(cluster, false);
        assertCTimesEqual(cluster);
        // Transition NN0 to active and do some FS ops.
        cluster.transitionToActive(0);
        fs = HATestUtil.configureFailoverFs(cluster, conf);
        assertTrue(fs.mkdirs(new Path("/foo1")));
        final long cidBeforeUpgrade = getCommittedTxnIdValue(qjCluster);
        // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade
        // flag.
        cluster.shutdownNameNode(1);
        cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
        cluster.restartNameNode(0, false);
        assertTrue(cidBeforeUpgrade <= getCommittedTxnIdValue(qjCluster));
        assertTrue(fs.mkdirs(new Path("/foo2")));
        checkNnPreviousDirExistence(cluster, 0, true);
        checkNnPreviousDirExistence(cluster, 1, false);
        checkJnPreviousDirExistence(qjCluster, true);
        // Now bootstrap the standby with the upgraded info.
        int rc = BootstrapStandby.run(new String[] { "-force" }, cluster.getConfiguration(1));
        assertEquals(0, rc);
        cluster.restartNameNode(1);
        final long cidDuringUpgrade = getCommittedTxnIdValue(qjCluster);
        assertTrue(cidDuringUpgrade > cidBeforeUpgrade);
        runFinalizeCommand(cluster);
        assertEquals(cidDuringUpgrade, getCommittedTxnIdValue(qjCluster));
        checkClusterPreviousDirExistence(cluster, false);
        checkJnPreviousDirExistence(qjCluster, false);
        assertCTimesEqual(cluster);
    } finally {
        if (fs != null) {
            fs.close();
        }
        if (qjCluster != null) {
            qjCluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) FileSystem(org.apache.hadoop.fs.FileSystem) Builder(org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster.Builder) MiniQJMHACluster(org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster) Test(org.junit.Test)

Aggregations

MiniQJMHACluster (org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster)14 Configuration (org.apache.hadoop.conf.Configuration)10 Path (org.apache.hadoop.fs.Path)10 Test (org.junit.Test)10 FileSystem (org.apache.hadoop.fs.FileSystem)5 RollingUpgradeInfo (org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo)5 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)4 EventBatch (org.apache.hadoop.hdfs.inotify.EventBatch)4 Builder (org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster.Builder)4 Event (org.apache.hadoop.hdfs.inotify.Event)2 File (java.io.File)1 IOException (java.io.IOException)1 OutputStream (java.io.OutputStream)1 URI (java.net.URI)1 ScheduledExecutorService (java.util.concurrent.ScheduledExecutorService)1 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)1 FSImage (org.apache.hadoop.hdfs.server.namenode.FSImage)1 NNStorage (org.apache.hadoop.hdfs.server.namenode.NNStorage)1 ExitUtil (org.apache.hadoop.util.ExitUtil)1 Before (org.junit.Before)1