Search in sources :

Example 1 with Builder

use of org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster.Builder in project hadoop by apache.

the class TestDFSUpgradeWithHA method testRollbackWithJournalNodes.

@Test
public void testRollbackWithJournalNodes() throws IOException, URISyntaxException {
    MiniQJMHACluster qjCluster = null;
    FileSystem fs = null;
    try {
        Builder builder = new MiniQJMHACluster.Builder(conf);
        builder.getDfsBuilder().numDataNodes(0);
        qjCluster = builder.build();
        MiniDFSCluster cluster = qjCluster.getDfsCluster();
        // No upgrade is in progress at the moment.
        checkClusterPreviousDirExistence(cluster, false);
        assertCTimesEqual(cluster);
        checkJnPreviousDirExistence(qjCluster, false);
        // Transition NN0 to active and do some FS ops.
        cluster.transitionToActive(0);
        fs = HATestUtil.configureFailoverFs(cluster, conf);
        assertTrue(fs.mkdirs(new Path("/foo1")));
        final long cidBeforeUpgrade = getCommittedTxnIdValue(qjCluster);
        // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade
        // flag.
        cluster.shutdownNameNode(1);
        cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
        cluster.restartNameNode(0, false);
        checkNnPreviousDirExistence(cluster, 0, true);
        checkNnPreviousDirExistence(cluster, 1, false);
        checkJnPreviousDirExistence(qjCluster, true);
        // NN0 should come up in the active state when given the -upgrade option,
        // so no need to transition it to active.
        assertTrue(fs.mkdirs(new Path("/foo2")));
        final long cidDuringUpgrade = getCommittedTxnIdValue(qjCluster);
        assertTrue(cidDuringUpgrade > cidBeforeUpgrade);
        // Now bootstrap the standby with the upgraded info.
        int rc = BootstrapStandby.run(new String[] { "-force" }, cluster.getConfiguration(1));
        assertEquals(0, rc);
        cluster.restartNameNode(1);
        checkNnPreviousDirExistence(cluster, 0, true);
        checkNnPreviousDirExistence(cluster, 1, true);
        checkJnPreviousDirExistence(qjCluster, true);
        assertCTimesEqual(cluster);
        // Shut down the NNs, but deliberately leave the JNs up and running.
        Collection<URI> nn1NameDirs = cluster.getNameDirs(0);
        cluster.shutdown();
        conf.setStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, Joiner.on(",").join(nn1NameDirs));
        NameNode.doRollback(conf, false);
        final long cidAfterRollback = getCommittedTxnIdValue(qjCluster);
        assertTrue(cidBeforeUpgrade < cidAfterRollback);
        // make sure the committedTxnId has been reset correctly after rollback
        assertTrue(cidDuringUpgrade > cidAfterRollback);
        // The rollback operation should have rolled back the first NN's local
        // dirs, and the shared dir, but not the other NN's dirs. Those have to be
        // done by bootstrapping the standby.
        checkNnPreviousDirExistence(cluster, 0, false);
        checkJnPreviousDirExistence(qjCluster, false);
    } finally {
        if (fs != null) {
            fs.close();
        }
        if (qjCluster != null) {
            qjCluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) FileSystem(org.apache.hadoop.fs.FileSystem) Builder(org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster.Builder) MiniQJMHACluster(org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster) URI(java.net.URI) Test(org.junit.Test)

Example 2 with Builder

use of org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster.Builder in project hadoop by apache.

the class TestDFSUpgradeWithHA method testUpgradeWithJournalNodes.

/**
   * Make sure that an HA NN can successfully upgrade when configured using
   * JournalNodes.
   */
@Test
public void testUpgradeWithJournalNodes() throws IOException, URISyntaxException {
    MiniQJMHACluster qjCluster = null;
    FileSystem fs = null;
    try {
        Builder builder = new MiniQJMHACluster.Builder(conf);
        builder.getDfsBuilder().numDataNodes(0);
        qjCluster = builder.build();
        MiniDFSCluster cluster = qjCluster.getDfsCluster();
        // No upgrade is in progress at the moment.
        checkJnPreviousDirExistence(qjCluster, false);
        checkClusterPreviousDirExistence(cluster, false);
        assertCTimesEqual(cluster);
        // Transition NN0 to active and do some FS ops.
        cluster.transitionToActive(0);
        fs = HATestUtil.configureFailoverFs(cluster, conf);
        assertTrue(fs.mkdirs(new Path("/foo1")));
        // get the value of the committedTxnId in journal nodes
        final long cidBeforeUpgrade = getCommittedTxnIdValue(qjCluster);
        // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade
        // flag.
        cluster.shutdownNameNode(1);
        cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
        cluster.restartNameNode(0, false);
        checkNnPreviousDirExistence(cluster, 0, true);
        checkNnPreviousDirExistence(cluster, 1, false);
        checkJnPreviousDirExistence(qjCluster, true);
        assertTrue(cidBeforeUpgrade <= getCommittedTxnIdValue(qjCluster));
        // NN0 should come up in the active state when given the -upgrade option,
        // so no need to transition it to active.
        assertTrue(fs.mkdirs(new Path("/foo2")));
        // Restart NN0 without the -upgrade flag, to make sure that works.
        cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR);
        cluster.restartNameNode(0, false);
        // Make sure we can still do FS ops after upgrading.
        cluster.transitionToActive(0);
        assertTrue(fs.mkdirs(new Path("/foo3")));
        assertTrue(getCommittedTxnIdValue(qjCluster) > cidBeforeUpgrade);
        // Now bootstrap the standby with the upgraded info.
        int rc = BootstrapStandby.run(new String[] { "-force" }, cluster.getConfiguration(1));
        assertEquals(0, rc);
        // Now restart NN1 and make sure that we can do ops against that as well.
        cluster.restartNameNode(1);
        cluster.transitionToStandby(0);
        cluster.transitionToActive(1);
        assertTrue(fs.mkdirs(new Path("/foo4")));
        assertCTimesEqual(cluster);
    } finally {
        if (fs != null) {
            fs.close();
        }
        if (qjCluster != null) {
            qjCluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) FileSystem(org.apache.hadoop.fs.FileSystem) Builder(org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster.Builder) MiniQJMHACluster(org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster) Test(org.junit.Test)

Example 3 with Builder

use of org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster.Builder in project hadoop by apache.

the class TestDFSUpgradeWithHA method testFinalizeFromSecondNameNodeWithJournalNodes.

/**
   * Make sure that even if the NN which initiated the upgrade is in the standby
   * state that we're allowed to finalize.
   */
@Test
public void testFinalizeFromSecondNameNodeWithJournalNodes() throws IOException, URISyntaxException {
    MiniQJMHACluster qjCluster = null;
    FileSystem fs = null;
    try {
        Builder builder = new MiniQJMHACluster.Builder(conf);
        builder.getDfsBuilder().numDataNodes(0);
        qjCluster = builder.build();
        MiniDFSCluster cluster = qjCluster.getDfsCluster();
        // No upgrade is in progress at the moment.
        checkJnPreviousDirExistence(qjCluster, false);
        checkClusterPreviousDirExistence(cluster, false);
        assertCTimesEqual(cluster);
        // Transition NN0 to active and do some FS ops.
        cluster.transitionToActive(0);
        fs = HATestUtil.configureFailoverFs(cluster, conf);
        assertTrue(fs.mkdirs(new Path("/foo1")));
        // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade
        // flag.
        cluster.shutdownNameNode(1);
        cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
        cluster.restartNameNode(0, false);
        checkNnPreviousDirExistence(cluster, 0, true);
        checkNnPreviousDirExistence(cluster, 1, false);
        checkJnPreviousDirExistence(qjCluster, true);
        // Now bootstrap the standby with the upgraded info.
        int rc = BootstrapStandby.run(new String[] { "-force" }, cluster.getConfiguration(1));
        assertEquals(0, rc);
        cluster.restartNameNode(1);
        // Make the second NN (not the one that initiated the upgrade) active when
        // the finalize command is run.
        cluster.transitionToStandby(0);
        cluster.transitionToActive(1);
        runFinalizeCommand(cluster);
        checkClusterPreviousDirExistence(cluster, false);
        checkJnPreviousDirExistence(qjCluster, false);
        assertCTimesEqual(cluster);
    } finally {
        if (fs != null) {
            fs.close();
        }
        if (qjCluster != null) {
            qjCluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) FileSystem(org.apache.hadoop.fs.FileSystem) Builder(org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster.Builder) MiniQJMHACluster(org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster) Test(org.junit.Test)

Example 4 with Builder

use of org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster.Builder in project hadoop by apache.

the class TestDFSUpgradeWithHA method testFinalizeWithJournalNodes.

@Test
public void testFinalizeWithJournalNodes() throws IOException, URISyntaxException {
    MiniQJMHACluster qjCluster = null;
    FileSystem fs = null;
    try {
        Builder builder = new MiniQJMHACluster.Builder(conf);
        builder.getDfsBuilder().numDataNodes(0);
        qjCluster = builder.build();
        MiniDFSCluster cluster = qjCluster.getDfsCluster();
        // No upgrade is in progress at the moment.
        checkJnPreviousDirExistence(qjCluster, false);
        checkClusterPreviousDirExistence(cluster, false);
        assertCTimesEqual(cluster);
        // Transition NN0 to active and do some FS ops.
        cluster.transitionToActive(0);
        fs = HATestUtil.configureFailoverFs(cluster, conf);
        assertTrue(fs.mkdirs(new Path("/foo1")));
        final long cidBeforeUpgrade = getCommittedTxnIdValue(qjCluster);
        // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade
        // flag.
        cluster.shutdownNameNode(1);
        cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
        cluster.restartNameNode(0, false);
        assertTrue(cidBeforeUpgrade <= getCommittedTxnIdValue(qjCluster));
        assertTrue(fs.mkdirs(new Path("/foo2")));
        checkNnPreviousDirExistence(cluster, 0, true);
        checkNnPreviousDirExistence(cluster, 1, false);
        checkJnPreviousDirExistence(qjCluster, true);
        // Now bootstrap the standby with the upgraded info.
        int rc = BootstrapStandby.run(new String[] { "-force" }, cluster.getConfiguration(1));
        assertEquals(0, rc);
        cluster.restartNameNode(1);
        final long cidDuringUpgrade = getCommittedTxnIdValue(qjCluster);
        assertTrue(cidDuringUpgrade > cidBeforeUpgrade);
        runFinalizeCommand(cluster);
        assertEquals(cidDuringUpgrade, getCommittedTxnIdValue(qjCluster));
        checkClusterPreviousDirExistence(cluster, false);
        checkJnPreviousDirExistence(qjCluster, false);
        assertCTimesEqual(cluster);
    } finally {
        if (fs != null) {
            fs.close();
        }
        if (qjCluster != null) {
            qjCluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) FileSystem(org.apache.hadoop.fs.FileSystem) Builder(org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster.Builder) MiniQJMHACluster(org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster) Test(org.junit.Test)

Example 5 with Builder

use of org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster.Builder in project hadoop by apache.

the class TestFailureToReadEdits method setUpCluster.

@Before
public void setUpCluster() throws Exception {
    conf = new Configuration();
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 1);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_KEY, 10);
    conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
    HAUtil.setAllowStandbyReads(conf, true);
    if (clusterType == TestType.SHARED_DIR_HA) {
        int basePort = 10000;
        int retryCount = 0;
        while (true) {
            try {
                basePort = 10000 + RANDOM.nextInt(1000) * 4;
                LOG.info("Set SHARED_DIR_HA cluster's basePort to " + basePort);
                MiniDFSNNTopology topology = MiniQJMHACluster.createDefaultTopology(basePort);
                cluster = new MiniDFSCluster.Builder(conf).nnTopology(topology).numDataNodes(0).checkExitOnShutdown(false).build();
                break;
            } catch (BindException e) {
                if (cluster != null) {
                    cluster.shutdown(true);
                    cluster = null;
                }
                ++retryCount;
                LOG.info("SHARED_DIR_HA: MiniQJMHACluster port conflicts, retried " + retryCount + " times " + e);
            }
        }
    } else {
        Builder builder = new MiniQJMHACluster.Builder(conf);
        builder.getDfsBuilder().numDataNodes(0).checkExitOnShutdown(false);
        miniQjmHaCluster = builder.build();
        cluster = miniQjmHaCluster.getDfsCluster();
    }
    cluster.waitActive();
    nn0 = cluster.getNameNode(0);
    nn1 = cluster.getNameNode(1);
    cluster.transitionToActive(0);
    fs = HATestUtil.configureFailoverFs(cluster, conf);
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) Builder(org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster.Builder) MiniDFSNNTopology(org.apache.hadoop.hdfs.MiniDFSNNTopology) BindException(java.net.BindException) Before(org.junit.Before)

Aggregations

MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)5 Builder (org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster.Builder)5 FileSystem (org.apache.hadoop.fs.FileSystem)4 Path (org.apache.hadoop.fs.Path)4 MiniQJMHACluster (org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster)4 Test (org.junit.Test)4 BindException (java.net.BindException)1 URI (java.net.URI)1 Configuration (org.apache.hadoop.conf.Configuration)1 MiniDFSNNTopology (org.apache.hadoop.hdfs.MiniDFSNNTopology)1 Before (org.junit.Before)1