Search in sources :

Example 76 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestNNStorageRetentionFunctional method testPurgingWithNameEditsDirAfterFailure.

/**
  * Test case where two directories are configured as NAME_AND_EDITS
  * and one of them fails to save storage. Since the edits and image
  * failure states are decoupled, the failure of image saving should
  * not prevent the purging of logs from that dir.
  */
@Test
public void testPurgingWithNameEditsDirAfterFailure() throws Exception {
    MiniDFSCluster cluster = null;
    Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY, 0);
    File sd0 = new File(TEST_ROOT_DIR, "nn0");
    File sd1 = new File(TEST_ROOT_DIR, "nn1");
    File cd0 = new File(sd0, "current");
    File cd1 = new File(sd1, "current");
    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, Joiner.on(",").join(sd0, sd1));
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).manageNameDfsDirs(false).format(true).build();
        NameNode nn = cluster.getNameNode();
        doSaveNamespace(nn);
        LOG.info("After first save, images 0 and 2 should exist in both dirs");
        assertGlobEquals(cd0, "fsimage_\\d*", getImageFileName(0), getImageFileName(2));
        assertGlobEquals(cd1, "fsimage_\\d*", getImageFileName(0), getImageFileName(2));
        assertGlobEquals(cd0, "edits_.*", getFinalizedEditsFileName(1, 2), getInProgressEditsFileName(3));
        assertGlobEquals(cd1, "edits_.*", getFinalizedEditsFileName(1, 2), getInProgressEditsFileName(3));
        doSaveNamespace(nn);
        LOG.info("After second save, image 0 should be purged, " + "and image 4 should exist in both.");
        assertGlobEquals(cd0, "fsimage_\\d*", getImageFileName(2), getImageFileName(4));
        assertGlobEquals(cd1, "fsimage_\\d*", getImageFileName(2), getImageFileName(4));
        assertGlobEquals(cd0, "edits_.*", getFinalizedEditsFileName(3, 4), getInProgressEditsFileName(5));
        assertGlobEquals(cd1, "edits_.*", getFinalizedEditsFileName(3, 4), getInProgressEditsFileName(5));
        LOG.info("Failing first storage dir by chmodding it");
        assertEquals(0, FileUtil.chmod(cd0.getAbsolutePath(), "000"));
        doSaveNamespace(nn);
        LOG.info("Restoring accessibility of first storage dir");
        assertEquals(0, FileUtil.chmod(cd0.getAbsolutePath(), "755"));
        LOG.info("nothing should have been purged in first storage dir");
        assertGlobEquals(cd0, "fsimage_\\d*", getImageFileName(2), getImageFileName(4));
        assertGlobEquals(cd0, "edits_.*", getFinalizedEditsFileName(3, 4), getInProgressEditsFileName(5));
        LOG.info("fsimage_2 should be purged in second storage dir");
        assertGlobEquals(cd1, "fsimage_\\d*", getImageFileName(4), getImageFileName(6));
        assertGlobEquals(cd1, "edits_.*", getFinalizedEditsFileName(5, 6), getInProgressEditsFileName(7));
        LOG.info("On next save, we should purge logs from the failed dir," + " but not images, since the image directory is in failed state.");
        doSaveNamespace(nn);
        assertGlobEquals(cd1, "fsimage_\\d*", getImageFileName(6), getImageFileName(8));
        assertGlobEquals(cd1, "edits_.*", getFinalizedEditsFileName(7, 8), getInProgressEditsFileName(9));
        assertGlobEquals(cd0, "fsimage_\\d*", getImageFileName(2), getImageFileName(4));
        assertGlobEquals(cd0, "edits_.*", getInProgressEditsFileName(9));
    } finally {
        FileUtil.chmod(cd0.getAbsolutePath(), "755");
        LOG.info("Shutting down...");
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) File(java.io.File) Test(org.junit.Test)

Example 77 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestNNThroughputBenchmark method testNNThroughputAgainstRemoteNN.

/**
   * This test runs {@link NNThroughputBenchmark} against a mini DFS cluster.
   */
@Test(timeout = 120000)
public void testNNThroughputAgainstRemoteNN() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16);
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
        cluster.waitActive();
        final Configuration benchConf = new HdfsConfiguration();
        FileSystem.setDefaultUri(benchConf, cluster.getURI());
        NNThroughputBenchmark.runBenchmark(benchConf, new String[] { "-op", "all" });
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Test(org.junit.Test)

Example 78 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestGenericJournalConf method testDummyJournalManager.

/**
   * Test that a dummy implementation of JournalManager can
   * be initialized on startup
   */
@Test
public void testDummyJournalManager() throws Exception {
    MiniDFSCluster cluster = null;
    Configuration conf = new Configuration();
    conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX + ".dummy", DummyJournalManager.class.getName());
    conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, DUMMY_URI);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_MINIMUM_KEY, 0);
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
        cluster.waitActive();
        assertTrue(DummyJournalManager.shouldPromptCalled);
        assertTrue(DummyJournalManager.formatCalled);
        assertNotNull(DummyJournalManager.conf);
        assertEquals(new URI(DUMMY_URI), DummyJournalManager.uri);
        assertNotNull(DummyJournalManager.nsInfo);
        assertEquals(DummyJournalManager.nsInfo.getClusterID(), cluster.getNameNode().getNamesystem().getClusterId());
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) URI(java.net.URI) Test(org.junit.Test)

Example 79 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestGenericJournalConf method testNotConfigured.

/** 
   * Test that an exception is thrown if a journal class doesn't exist
   * in the configuration 
   */
@Test(expected = IllegalArgumentException.class)
public void testNotConfigured() throws Exception {
    MiniDFSCluster cluster = null;
    Configuration conf = new Configuration();
    conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, "dummy://test");
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
        cluster.waitActive();
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) Test(org.junit.Test)

Example 80 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestGenericJournalConf method testBadConstructor.

/**
   * Test that a implementation of JournalManager without a 
   * (Configuration,URI) constructor throws an exception
   */
@Test
public void testBadConstructor() throws Exception {
    MiniDFSCluster cluster = null;
    Configuration conf = new Configuration();
    conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX + ".dummy", BadConstructorJournalManager.class.getName());
    conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, "dummy://test");
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
        cluster.waitActive();
        fail("Should have failed before this point");
    } catch (IllegalArgumentException iae) {
        if (!iae.getMessage().contains("Unable to construct journal")) {
            fail("Should have failed with unable to construct exception");
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) Test(org.junit.Test)

Aggregations

MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)507 Test (org.junit.Test)429 Configuration (org.apache.hadoop.conf.Configuration)403 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)312 Path (org.apache.hadoop.fs.Path)290 FileSystem (org.apache.hadoop.fs.FileSystem)211 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)183 IOException (java.io.IOException)107 File (java.io.File)83 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)64 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)53 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)35 RandomAccessFile (java.io.RandomAccessFile)33 MetricsRecordBuilder (org.apache.hadoop.metrics2.MetricsRecordBuilder)33 URI (java.net.URI)31 ArrayList (java.util.ArrayList)29 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)28 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)26 FsPermission (org.apache.hadoop.fs.permission.FsPermission)25 HttpServerFunctionalTest (org.apache.hadoop.http.HttpServerFunctionalTest)24