Search in sources :

Example 71 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestListCorruptFileBlocks method testlistCorruptFileBlocksDFS.

/**
   * test listCorruptFileBlocks in DistributedFileSystem
   */
@Test(timeout = 300000)
public void testlistCorruptFileBlocksDFS() throws Exception {
    Configuration conf = new Configuration();
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
    // datanode scans
    conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
    // directories
    FileSystem fs = null;
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).build();
        cluster.waitActive();
        fs = cluster.getFileSystem();
        DistributedFileSystem dfs = (DistributedFileSystem) fs;
        DFSTestUtil util = new DFSTestUtil.Builder().setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1).setMaxSize(1024).build();
        util.createFiles(fs, "/corruptData");
        RemoteIterator<Path> corruptFileBlocks = dfs.listCorruptFileBlocks(new Path("/corruptData"));
        int numCorrupt = countPaths(corruptFileBlocks);
        assertTrue(numCorrupt == 0);
        // delete the blocks
        String bpid = cluster.getNamesystem().getBlockPoolId();
        // For loop through number of datadirectories per datanode (2)
        for (int i = 0; i < 2; i++) {
            File storageDir = cluster.getInstanceStorageDir(0, i);
            File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
            List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(data_dir);
            if (metadataFiles == null)
                continue;
            // (blocks.length > 0));
            for (File metadataFile : metadataFiles) {
                File blockFile = Block.metaToBlockFile(metadataFile);
                LOG.info("Deliberately removing file " + blockFile.getName());
                assertTrue("Cannot remove file.", blockFile.delete());
                LOG.info("Deliberately removing file " + metadataFile.getName());
                assertTrue("Cannot remove file.", metadataFile.delete());
            // break;
            }
        }
        int count = 0;
        corruptFileBlocks = dfs.listCorruptFileBlocks(new Path("/corruptData"));
        numCorrupt = countPaths(corruptFileBlocks);
        while (numCorrupt < 3) {
            Thread.sleep(1000);
            corruptFileBlocks = dfs.listCorruptFileBlocks(new Path("/corruptData"));
            numCorrupt = countPaths(corruptFileBlocks);
            count++;
            if (count > 30)
                break;
        }
        // Validate we get all the corrupt files
        LOG.info("Namenode has bad files. " + numCorrupt);
        assertTrue(numCorrupt == 3);
        util.cleanup(fs, "/corruptData");
        util.cleanup(fs, "/goodData");
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DFSTestUtil(org.apache.hadoop.hdfs.DFSTestUtil) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) Test(org.junit.Test)

Example 72 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestListCorruptFileBlocks method testMaxCorruptFiles.

/**
   * Test if NN.listCorruptFiles() returns the right number of results.
   * The corrupt blocks are detected by the BlockPoolSliceScanner.
   * Also, test that DFS.listCorruptFileBlocks can make multiple successive
   * calls.
   */
@Test(timeout = 300000)
public void testMaxCorruptFiles() throws Exception {
    MiniDFSCluster cluster = null;
    try {
        Configuration conf = new HdfsConfiguration();
        // datanode sends block reports
        conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 3 * 1000);
        cluster = new MiniDFSCluster.Builder(conf).build();
        FileSystem fs = cluster.getFileSystem();
        final int maxCorruptFileBlocks = FSNamesystem.DEFAULT_MAX_CORRUPT_FILEBLOCKS_RETURNED;
        // create 110 files with one block each
        DFSTestUtil util = new DFSTestUtil.Builder().setName("testMaxCorruptFiles").setNumFiles(maxCorruptFileBlocks * 3).setMaxLevels(1).setMaxSize(512).build();
        util.createFiles(fs, "/srcdat2", (short) 1);
        util.waitReplication(fs, "/srcdat2", (short) 1);
        // verify that there are no bad blocks.
        final NameNode namenode = cluster.getNameNode();
        Collection<FSNamesystem.CorruptFileBlockInfo> badFiles = namenode.getNamesystem().listCorruptFileBlocks("/srcdat2", null);
        assertTrue("Namenode has " + badFiles.size() + " corrupt files. Expecting none.", badFiles.size() == 0);
        // Now deliberately blocks from all files
        final String bpid = cluster.getNamesystem().getBlockPoolId();
        for (int i = 0; i < 4; i++) {
            for (int j = 0; j <= 1; j++) {
                File storageDir = cluster.getInstanceStorageDir(i, j);
                File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
                LOG.info("Removing files from " + data_dir);
                List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(data_dir);
                if (metadataFiles == null)
                    continue;
                for (File metadataFile : metadataFiles) {
                    File blockFile = Block.metaToBlockFile(metadataFile);
                    assertTrue("Cannot remove file.", blockFile.delete());
                    assertTrue("Cannot remove file.", metadataFile.delete());
                }
            }
        }
        // Run the direcrtoryScanner to update the Datanodes volumeMap
        DataNode dn = cluster.getDataNodes().get(0);
        DataNodeTestUtils.runDirectoryScanner(dn);
        // Occasionally the BlockPoolSliceScanner can run before we have removed
        // the blocks. Restart the Datanode to trigger the scanner into running
        // once more.
        LOG.info("Restarting Datanode to trigger BlockPoolSliceScanner");
        cluster.restartDataNodes();
        cluster.waitActive();
        badFiles = namenode.getNamesystem().listCorruptFileBlocks("/srcdat2", null);
        while (badFiles.size() < maxCorruptFileBlocks) {
            LOG.info("# of corrupt files is: " + badFiles.size());
            Thread.sleep(10000);
            badFiles = namenode.getNamesystem().listCorruptFileBlocks("/srcdat2", null);
        }
        badFiles = namenode.getNamesystem().listCorruptFileBlocks("/srcdat2", null);
        LOG.info("Namenode has bad files. " + badFiles.size());
        assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting " + maxCorruptFileBlocks + ".", badFiles.size() == maxCorruptFileBlocks);
        CorruptFileBlockIterator iter = (CorruptFileBlockIterator) fs.listCorruptFileBlocks(new Path("/srcdat2"));
        int corruptPaths = countPaths(iter);
        assertTrue("Expected more than " + maxCorruptFileBlocks + " corrupt file blocks but got " + corruptPaths, corruptPaths > maxCorruptFileBlocks);
        assertTrue("Iterator should have made more than 1 call but made " + iter.getCallsMade(), iter.getCallsMade() > 1);
        util.cleanup(fs, "/srcdat2");
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DFSTestUtil(org.apache.hadoop.hdfs.DFSTestUtil) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) CorruptFileBlockIterator(org.apache.hadoop.hdfs.client.impl.CorruptFileBlockIterator) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) Test(org.junit.Test)

Example 73 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestNameEditsConfigs method testNameEditsConfigs.

/**
   * Test various configuration options of dfs.namenode.name.dir and dfs.namenode.edits.dir
   * The test creates files and restarts cluster with different configs.
   * 1. Starts cluster with shared name and edits dirs
   * 2. Restarts cluster by adding additional (different) name and edits dirs
   * 3. Restarts cluster by removing shared name and edits dirs by allowing to 
   *    start using separate name and edits dirs
   * 4. Restart cluster by adding shared directory again, but make sure we 
   *    do not read any stale image or edits. 
   * All along the test, we create and delete files at reach restart to make
   * sure we are reading proper edits and image.
   * @throws Exception 
   */
@Test
public void testNameEditsConfigs() throws Exception {
    Path file1 = new Path("TestNameEditsConfigs1");
    Path file2 = new Path("TestNameEditsConfigs2");
    Path file3 = new Path("TestNameEditsConfigs3");
    MiniDFSCluster cluster = null;
    SecondaryNameNode secondary = null;
    Configuration conf = null;
    FileSystem fileSys = null;
    final File newNameDir = new File(base_dir, "name");
    final File newEditsDir = new File(base_dir, "edits");
    final File nameAndEdits = new File(base_dir, "name_and_edits");
    final File checkpointNameDir = new File(base_dir, "secondname");
    final File checkpointEditsDir = new File(base_dir, "secondedits");
    final File checkpointNameAndEdits = new File(base_dir, "second_name_and_edits");
    ImmutableList<File> allCurrentDirs = ImmutableList.of(new File(nameAndEdits, "current"), new File(newNameDir, "current"), new File(newEditsDir, "current"), new File(checkpointNameAndEdits, "current"), new File(checkpointNameDir, "current"), new File(checkpointEditsDir, "current"));
    ImmutableList<File> imageCurrentDirs = ImmutableList.of(new File(nameAndEdits, "current"), new File(newNameDir, "current"), new File(checkpointNameAndEdits, "current"), new File(checkpointNameDir, "current"));
    // Start namenode with same dfs.namenode.name.dir and dfs.namenode.edits.dir
    conf = new HdfsConfiguration();
    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameAndEdits.getPath());
    conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameAndEdits.getPath());
    conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, checkpointNameAndEdits.getPath());
    conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY, checkpointNameAndEdits.getPath());
    replication = (short) conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
    // Manage our own dfs directories
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).manageNameDfsDirs(false).build();
    cluster.waitActive();
    secondary = startSecondaryNameNode(conf);
    fileSys = cluster.getFileSystem();
    try {
        assertTrue(!fileSys.exists(file1));
        DFSTestUtil.createFile(fileSys, file1, FILE_SIZE, FILE_SIZE, BLOCK_SIZE, replication, SEED);
        checkFile(fileSys, file1, replication);
        secondary.doCheckpoint();
    } finally {
        fileSys.close();
        cluster.shutdown();
        secondary.shutdown();
    }
    // Start namenode with additional dfs.namenode.name.dir and dfs.namenode.edits.dir
    conf = new HdfsConfiguration();
    assertTrue(newNameDir.mkdir());
    assertTrue(newEditsDir.mkdir());
    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameAndEdits.getPath() + "," + newNameDir.getPath());
    conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameAndEdits.getPath() + "," + newEditsDir.getPath());
    conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, checkpointNameDir.getPath() + "," + checkpointNameAndEdits.getPath());
    conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY, checkpointEditsDir.getPath() + "," + checkpointNameAndEdits.getPath());
    replication = (short) conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
    // Manage our own dfs directories. Do not format.
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).manageNameDfsDirs(false).build();
    cluster.waitActive();
    secondary = startSecondaryNameNode(conf);
    fileSys = cluster.getFileSystem();
    try {
        assertTrue(fileSys.exists(file1));
        checkFile(fileSys, file1, replication);
        cleanupFile(fileSys, file1);
        DFSTestUtil.createFile(fileSys, file2, FILE_SIZE, FILE_SIZE, BLOCK_SIZE, replication, SEED);
        checkFile(fileSys, file2, replication);
        secondary.doCheckpoint();
    } finally {
        fileSys.close();
        cluster.shutdown();
        secondary.shutdown();
    }
    FSImageTestUtil.assertParallelFilesAreIdentical(allCurrentDirs, ImmutableSet.of("VERSION"));
    FSImageTestUtil.assertSameNewestImage(imageCurrentDirs);
    // Now remove common directory both have and start namenode with 
    // separate name and edits dirs
    conf = new HdfsConfiguration();
    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, newNameDir.getPath());
    conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, newEditsDir.getPath());
    conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, checkpointNameDir.getPath());
    conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY, checkpointEditsDir.getPath());
    replication = (short) conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).manageNameDfsDirs(false).build();
    cluster.waitActive();
    secondary = startSecondaryNameNode(conf);
    fileSys = cluster.getFileSystem();
    try {
        assertTrue(!fileSys.exists(file1));
        assertTrue(fileSys.exists(file2));
        checkFile(fileSys, file2, replication);
        cleanupFile(fileSys, file2);
        DFSTestUtil.createFile(fileSys, file3, FILE_SIZE, FILE_SIZE, BLOCK_SIZE, replication, SEED);
        checkFile(fileSys, file3, replication);
        secondary.doCheckpoint();
    } finally {
        fileSys.close();
        cluster.shutdown();
        secondary.shutdown();
    }
    // No edit logs in new name dir
    checkImageAndEditsFilesExistence(newNameDir, true, false);
    checkImageAndEditsFilesExistence(newEditsDir, false, true);
    checkImageAndEditsFilesExistence(checkpointNameDir, true, false);
    checkImageAndEditsFilesExistence(checkpointEditsDir, false, true);
    // Add old name_and_edits dir. File system should not read image or edits
    // from old dir
    assertTrue(FileUtil.fullyDelete(new File(nameAndEdits, "current")));
    assertTrue(FileUtil.fullyDelete(new File(checkpointNameAndEdits, "current")));
    conf = new HdfsConfiguration();
    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameAndEdits.getPath() + "," + newNameDir.getPath());
    conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameAndEdits + "," + newEditsDir.getPath());
    conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, checkpointNameDir.getPath() + "," + checkpointNameAndEdits.getPath());
    conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY, checkpointEditsDir.getPath() + "," + checkpointNameAndEdits.getPath());
    replication = (short) conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).manageNameDfsDirs(false).build();
    cluster.waitActive();
    secondary = startSecondaryNameNode(conf);
    fileSys = cluster.getFileSystem();
    try {
        assertTrue(!fileSys.exists(file1));
        assertTrue(!fileSys.exists(file2));
        assertTrue(fileSys.exists(file3));
        checkFile(fileSys, file3, replication);
        secondary.doCheckpoint();
    } finally {
        fileSys.close();
        cluster.shutdown();
        secondary.shutdown();
    }
    checkImageAndEditsFilesExistence(nameAndEdits, true, true);
    checkImageAndEditsFilesExistence(checkpointNameAndEdits, true, true);
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) File(java.io.File) Test(org.junit.Test)

Example 74 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestNameNodeMXBean method testDecommissioningNodes.

@Test(timeout = 120000)
public void testDecommissioningNodes() throws Exception {
    Configuration conf = new Configuration();
    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 30);
    MiniDFSCluster cluster = null;
    HostsFileWriter hostsFileWriter = new HostsFileWriter();
    hostsFileWriter.initialize(conf, "temp/TestNameNodeMXBean");
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
        cluster.waitActive();
        FSNamesystem fsn = cluster.getNameNode().namesystem;
        MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
        ObjectName mxbeanName = new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo");
        List<String> hosts = new ArrayList<>();
        for (DataNode dn : cluster.getDataNodes()) {
            hosts.add(dn.getDisplayName());
        }
        hostsFileWriter.initIncludeHosts(hosts.toArray(new String[hosts.size()]));
        fsn.getBlockManager().getDatanodeManager().refreshNodes(conf);
        // 1. Verify Live nodes
        String liveNodesInfo = (String) (mbs.getAttribute(mxbeanName, "LiveNodes"));
        Map<String, Map<String, Object>> liveNodes = (Map<String, Map<String, Object>>) JSON.parse(liveNodesInfo);
        assertEquals(fsn.getLiveNodes(), liveNodesInfo);
        assertEquals(fsn.getNumLiveDataNodes(), liveNodes.size());
        for (Map<String, Object> liveNode : liveNodes.values()) {
            assertTrue(liveNode.containsKey("lastContact"));
            assertTrue(liveNode.containsKey("xferaddr"));
        }
        // Add the 1st DataNode to Decommission list
        hostsFileWriter.initExcludeHost(cluster.getDataNodes().get(0).getDisplayName());
        fsn.getBlockManager().getDatanodeManager().refreshNodes(conf);
        // Wait for the DecommissionManager to complete refresh nodes
        GenericTestUtils.waitFor(new Supplier<Boolean>() {

            @Override
            public Boolean get() {
                try {
                    String decomNodesInfo = (String) (mbs.getAttribute(mxbeanName, "DecomNodes"));
                    Map<String, Map<String, Object>> decomNodes = (Map<String, Map<String, Object>>) JSON.parse(decomNodesInfo);
                    if (decomNodes.size() > 0) {
                        return true;
                    }
                } catch (Exception e) {
                    return false;
                }
                return false;
            }
        }, 1000, 60000);
        // 2. Verify Decommission InProgress nodes
        String decomNodesInfo = (String) (mbs.getAttribute(mxbeanName, "DecomNodes"));
        Map<String, Map<String, Object>> decomNodes = (Map<String, Map<String, Object>>) JSON.parse(decomNodesInfo);
        assertEquals(fsn.getDecomNodes(), decomNodesInfo);
        assertEquals(fsn.getNumDecommissioningDataNodes(), decomNodes.size());
        assertEquals(0, fsn.getNumDecomLiveDataNodes());
        assertEquals(0, fsn.getNumDecomDeadDataNodes());
        // Wait for the DecommissionManager to complete check
        GenericTestUtils.waitFor(new Supplier<Boolean>() {

            @Override
            public Boolean get() {
                if (fsn.getNumDecomLiveDataNodes() == 1) {
                    return true;
                }
                return false;
            }
        }, 1000, 60000);
        // 3. Verify Decommissioned nodes
        decomNodesInfo = (String) (mbs.getAttribute(mxbeanName, "DecomNodes"));
        decomNodes = (Map<String, Map<String, Object>>) JSON.parse(decomNodesInfo);
        assertEquals(0, decomNodes.size());
        assertEquals(fsn.getDecomNodes(), decomNodesInfo);
        assertEquals(1, fsn.getNumDecomLiveDataNodes());
        assertEquals(0, fsn.getNumDecomDeadDataNodes());
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
        hostsFileWriter.cleanup();
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HostsFileWriter(org.apache.hadoop.hdfs.util.HostsFileWriter) ArrayList(java.util.ArrayList) BindException(java.net.BindException) IOException(java.io.IOException) ObjectName(javax.management.ObjectName) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) Map(java.util.Map) HashMap(java.util.HashMap) MBeanServer(javax.management.MBeanServer) Test(org.junit.Test)

Example 75 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestNameNodeMXBean method testNNDirectorySize.

@Test(timeout = 120000)
public void testNNDirectorySize() throws Exception {
    Configuration conf = new Configuration();
    conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
    MiniDFSCluster cluster = null;
    for (int i = 0; i < 5; i++) {
        try {
            // Have to specify IPC ports so the NNs can talk to each other.
            int[] ports = ServerSocketUtil.getPorts(2);
            MiniDFSNNTopology topology = new MiniDFSNNTopology().addNameservice(new MiniDFSNNTopology.NSConf("ns1").addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(ports[0])).addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(ports[1])));
            cluster = new MiniDFSCluster.Builder(conf).nnTopology(topology).numDataNodes(0).build();
            break;
        } catch (BindException e) {
            // retry if race on ports given by ServerSocketUtil#getPorts
            continue;
        }
    }
    if (cluster == null) {
        fail("failed to start mini cluster.");
    }
    FileSystem fs = null;
    try {
        cluster.waitActive();
        FSNamesystem nn0 = cluster.getNamesystem(0);
        FSNamesystem nn1 = cluster.getNamesystem(1);
        checkNNDirSize(cluster.getNameDirs(0), nn0.getNameDirSize());
        checkNNDirSize(cluster.getNameDirs(1), nn1.getNameDirSize());
        cluster.transitionToActive(0);
        fs = cluster.getFileSystem(0);
        DFSTestUtil.createFile(fs, new Path("/file"), 0, (short) 1, 0L);
        //rollEditLog
        HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0), cluster.getNameNode(1));
        checkNNDirSize(cluster.getNameDirs(0), nn0.getNameDirSize());
        checkNNDirSize(cluster.getNameDirs(1), nn1.getNameDirSize());
        //Test metric after call saveNamespace
        DFSTestUtil.createFile(fs, new Path("/file"), 0, (short) 1, 0L);
        nn0.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
        nn0.saveNamespace(0, 0);
        checkNNDirSize(cluster.getNameDirs(0), nn0.getNameDirSize());
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) BindException(java.net.BindException) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) MiniDFSNNTopology(org.apache.hadoop.hdfs.MiniDFSNNTopology) Test(org.junit.Test)

Aggregations

MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)507 Test (org.junit.Test)429 Configuration (org.apache.hadoop.conf.Configuration)403 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)312 Path (org.apache.hadoop.fs.Path)290 FileSystem (org.apache.hadoop.fs.FileSystem)211 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)183 IOException (java.io.IOException)107 File (java.io.File)83 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)64 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)53 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)35 RandomAccessFile (java.io.RandomAccessFile)33 MetricsRecordBuilder (org.apache.hadoop.metrics2.MetricsRecordBuilder)33 URI (java.net.URI)31 ArrayList (java.util.ArrayList)29 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)28 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)26 FsPermission (org.apache.hadoop.fs.permission.FsPermission)25 HttpServerFunctionalTest (org.apache.hadoop.http.HttpServerFunctionalTest)24