Search in sources :

Example 26 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestDFSUpgradeWithHA method testCannotUpgradeSecondNameNode.

/**
   * Make sure that starting a second NN with the -upgrade flag fails if the
   * other NN has already done that.
   */
@Test
public void testCannotUpgradeSecondNameNode() throws IOException, URISyntaxException {
    MiniDFSCluster cluster = null;
    FileSystem fs = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).build();
        File sharedDir = new File(cluster.getSharedEditsDir(0, 1));
        // No upgrade is in progress at the moment.
        checkClusterPreviousDirExistence(cluster, false);
        assertCTimesEqual(cluster);
        checkPreviousDirExistence(sharedDir, false);
        // Transition NN0 to active and do some FS ops.
        cluster.transitionToActive(0);
        fs = HATestUtil.configureFailoverFs(cluster, conf);
        assertTrue(fs.mkdirs(new Path("/foo1")));
        // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade
        // flag.
        cluster.shutdownNameNode(1);
        cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
        cluster.restartNameNode(0, false);
        checkNnPreviousDirExistence(cluster, 0, true);
        checkNnPreviousDirExistence(cluster, 1, false);
        checkPreviousDirExistence(sharedDir, true);
        // NN0 should come up in the active state when given the -upgrade option,
        // so no need to transition it to active.
        assertTrue(fs.mkdirs(new Path("/foo2")));
        // Restart NN0 without the -upgrade flag, to make sure that works.
        cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR);
        cluster.restartNameNode(0, false);
        // Make sure we can still do FS ops after upgrading.
        cluster.transitionToActive(0);
        assertTrue(fs.mkdirs(new Path("/foo3")));
        // Make sure that starting the second NN with the -upgrade flag fails.
        cluster.getNameNodeInfos()[1].setStartOpt(StartupOption.UPGRADE);
        try {
            cluster.restartNameNode(1, false);
            fail("Should not have been able to start second NN with -upgrade");
        } catch (IOException ioe) {
            GenericTestUtils.assertExceptionContains("It looks like the shared log is already being upgraded", ioe);
        }
    } finally {
        if (fs != null) {
            fs.close();
        }
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) FileSystem(org.apache.hadoop.fs.FileSystem) Builder(org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster.Builder) IOException(java.io.IOException) BestEffortLongFile(org.apache.hadoop.hdfs.util.BestEffortLongFile) PersistentLongFile(org.apache.hadoop.hdfs.util.PersistentLongFile) File(java.io.File) Test(org.junit.Test)

Example 27 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestDFSUpgradeWithHA method testCannotFinalizeIfNoActive.

/**
   * Ensure that an admin cannot finalize an HA upgrade without at least one NN
   * being active.
   */
@Test
public void testCannotFinalizeIfNoActive() throws IOException, URISyntaxException {
    MiniDFSCluster cluster = null;
    FileSystem fs = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).build();
        File sharedDir = new File(cluster.getSharedEditsDir(0, 1));
        // No upgrade is in progress at the moment.
        checkClusterPreviousDirExistence(cluster, false);
        assertCTimesEqual(cluster);
        checkPreviousDirExistence(sharedDir, false);
        // Transition NN0 to active and do some FS ops.
        cluster.transitionToActive(0);
        fs = HATestUtil.configureFailoverFs(cluster, conf);
        assertTrue(fs.mkdirs(new Path("/foo1")));
        // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade
        // flag.
        cluster.shutdownNameNode(1);
        cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
        cluster.restartNameNode(0, false);
        checkNnPreviousDirExistence(cluster, 0, true);
        checkNnPreviousDirExistence(cluster, 1, false);
        checkPreviousDirExistence(sharedDir, true);
        // NN0 should come up in the active state when given the -upgrade option,
        // so no need to transition it to active.
        assertTrue(fs.mkdirs(new Path("/foo2")));
        // Restart NN0 without the -upgrade flag, to make sure that works.
        cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR);
        cluster.restartNameNode(0, false);
        // Make sure we can still do FS ops after upgrading.
        cluster.transitionToActive(0);
        assertTrue(fs.mkdirs(new Path("/foo3")));
        // Now bootstrap the standby with the upgraded info.
        int rc = BootstrapStandby.run(new String[] { "-force" }, cluster.getConfiguration(1));
        assertEquals(0, rc);
        // Now restart NN1 and make sure that we can do ops against that as well.
        cluster.restartNameNode(1);
        cluster.transitionToStandby(0);
        cluster.transitionToActive(1);
        assertTrue(fs.mkdirs(new Path("/foo4")));
        assertCTimesEqual(cluster);
        // Now there's no active NN.
        cluster.transitionToStandby(1);
        try {
            runFinalizeCommand(cluster);
            fail("Should not have been able to finalize upgrade with no NN active");
        } catch (IOException ioe) {
            GenericTestUtils.assertExceptionContains("Cannot finalize with no NameNode active", ioe);
        }
    } finally {
        if (fs != null) {
            fs.close();
        }
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) FileSystem(org.apache.hadoop.fs.FileSystem) Builder(org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster.Builder) IOException(java.io.IOException) BestEffortLongFile(org.apache.hadoop.hdfs.util.BestEffortLongFile) PersistentLongFile(org.apache.hadoop.hdfs.util.PersistentLongFile) File(java.io.File) Test(org.junit.Test)

Example 28 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestHAFsck method testHaFsck.

/**
   * Test that fsck still works with HA enabled.
   */
@Test
public void testHaFsck() throws Exception {
    Configuration conf = new Configuration();
    // need some HTTP ports
    MiniDFSNNTopology topology = new MiniDFSNNTopology().addNameservice(new MiniDFSNNTopology.NSConf("ha-nn-uri-0").addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10051)).addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10052)));
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(topology).numDataNodes(0).build();
    FileSystem fs = null;
    try {
        cluster.waitActive();
        cluster.transitionToActive(0);
        // Make sure conf has the relevant HA configs.
        HATestUtil.setFailoverConfigurations(cluster, conf, "ha-nn-uri-0", 0);
        fs = HATestUtil.configureFailoverFs(cluster, conf);
        fs.mkdirs(new Path("/test1"));
        fs.mkdirs(new Path("/test2"));
        runFsck(conf);
        cluster.transitionToStandby(0);
        cluster.transitionToActive(1);
        runFsck(conf);
    } finally {
        if (fs != null) {
            fs.close();
        }
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) MiniDFSNNTopology(org.apache.hadoop.hdfs.MiniDFSNNTopology) Test(org.junit.Test)

Example 29 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestHAMetrics method testHAInodeCount.

@Test
public void testHAInodeCount() throws Exception {
    Configuration conf = new Configuration();
    conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
    conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, Integer.MAX_VALUE);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(1).build();
    FileSystem fs = null;
    try {
        cluster.waitActive();
        FSNamesystem nn0 = cluster.getNamesystem(0);
        FSNamesystem nn1 = cluster.getNamesystem(1);
        cluster.transitionToActive(0);
        fs = HATestUtil.configureFailoverFs(cluster, conf);
        DFSTestUtil.createFile(fs, new Path("/testHAInodeCount1"), 10, (short) 1, 1L);
        DFSTestUtil.createFile(fs, new Path("/testHAInodeCount2"), 10, (short) 1, 1L);
        DFSTestUtil.createFile(fs, new Path("/testHAInodeCount3"), 10, (short) 1, 1L);
        DFSTestUtil.createFile(fs, new Path("/testHAInodeCount4"), 10, (short) 1, 1L);
        // 1 dir and 4 files
        assertEquals(5, nn0.getFilesTotal());
        // The SBN still has one dir, which is "/".
        assertEquals(1, nn1.getFilesTotal());
        // Save fsimage so that nn does not build up namesystem by replaying
        // edits, but load from the image.
        ((DistributedFileSystem) fs).setSafeMode(SafeModeAction.SAFEMODE_ENTER);
        ((DistributedFileSystem) fs).saveNamespace();
        // Flip the two namenodes and restart the standby, which will load
        // the fsimage.
        cluster.transitionToStandby(0);
        cluster.transitionToActive(1);
        cluster.restartNameNode(0);
        assertEquals(nn0.getHAState(), "standby");
        // The restarted standby should report the correct count
        nn0 = cluster.getNamesystem(0);
        assertEquals(5, nn0.getFilesTotal());
    } finally {
        IOUtils.cleanup(LOG, fs);
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Example 30 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestHAMetrics method testHAMetrics.

@Test(timeout = 300000)
public void testHAMetrics() throws Exception {
    Configuration conf = new Configuration();
    conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
    conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, Integer.MAX_VALUE);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(1).build();
    FileSystem fs = null;
    try {
        cluster.waitActive();
        FSNamesystem nn0 = cluster.getNamesystem(0);
        FSNamesystem nn1 = cluster.getNamesystem(1);
        assertEquals(nn0.getHAState(), "standby");
        assertTrue(0 < nn0.getMillisSinceLastLoadedEdits());
        assertEquals(nn1.getHAState(), "standby");
        assertTrue(0 < nn1.getMillisSinceLastLoadedEdits());
        cluster.transitionToActive(0);
        final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
        final ObjectName mxbeanName = new ObjectName("Hadoop:service=NameNode,name=NameNodeStatus");
        final Long ltt1 = (Long) mbs.getAttribute(mxbeanName, "LastHATransitionTime");
        assertTrue("lastHATransitionTime should be > 0", ltt1 > 0);
        assertEquals("active", nn0.getHAState());
        assertEquals(0, nn0.getMillisSinceLastLoadedEdits());
        assertEquals("standby", nn1.getHAState());
        assertTrue(0 < nn1.getMillisSinceLastLoadedEdits());
        cluster.transitionToStandby(0);
        final Long ltt2 = (Long) mbs.getAttribute(mxbeanName, "LastHATransitionTime");
        assertTrue("lastHATransitionTime should be > " + ltt1, ltt2 > ltt1);
        cluster.transitionToActive(1);
        assertEquals("standby", nn0.getHAState());
        assertTrue(0 < nn0.getMillisSinceLastLoadedEdits());
        assertEquals("active", nn1.getHAState());
        assertEquals(0, nn1.getMillisSinceLastLoadedEdits());
        // make sure standby gets a little out-of-date
        Thread.sleep(2000);
        assertTrue(2000 <= nn0.getMillisSinceLastLoadedEdits());
        assertEquals(0, nn0.getPendingDataNodeMessageCount());
        assertEquals(0, nn1.getPendingDataNodeMessageCount());
        fs = HATestUtil.configureFailoverFs(cluster, conf);
        DFSTestUtil.createFile(fs, new Path("/foo"), 10, (short) 1, 1L);
        assertTrue(0 < nn0.getPendingDataNodeMessageCount());
        assertEquals(0, nn1.getPendingDataNodeMessageCount());
        long millisSinceLastLoadedEdits = nn0.getMillisSinceLastLoadedEdits();
        HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(1), cluster.getNameNode(0));
        assertEquals(0, nn0.getPendingDataNodeMessageCount());
        assertEquals(0, nn1.getPendingDataNodeMessageCount());
        long newMillisSinceLastLoadedEdits = nn0.getMillisSinceLastLoadedEdits();
        // Since we just waited for the standby to catch up, the time since we
        // last loaded edits should be very low.
        assertTrue("expected " + millisSinceLastLoadedEdits + " > " + newMillisSinceLastLoadedEdits, millisSinceLastLoadedEdits > newMillisSinceLastLoadedEdits);
    } finally {
        IOUtils.cleanup(LOG, fs);
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FileSystem(org.apache.hadoop.fs.FileSystem) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) MBeanServer(javax.management.MBeanServer) ObjectName(javax.management.ObjectName) Test(org.junit.Test)

Aggregations

MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)507 Test (org.junit.Test)429 Configuration (org.apache.hadoop.conf.Configuration)403 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)312 Path (org.apache.hadoop.fs.Path)290 FileSystem (org.apache.hadoop.fs.FileSystem)211 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)183 IOException (java.io.IOException)107 File (java.io.File)83 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)64 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)53 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)35 RandomAccessFile (java.io.RandomAccessFile)33 MetricsRecordBuilder (org.apache.hadoop.metrics2.MetricsRecordBuilder)33 URI (java.net.URI)31 ArrayList (java.util.ArrayList)29 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)28 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)26 FsPermission (org.apache.hadoop.fs.permission.FsPermission)25 HttpServerFunctionalTest (org.apache.hadoop.http.HttpServerFunctionalTest)24