Search in sources :

Example 1 with BlockCollection

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection in project hadoop by apache.

the class TestFsck method testBlockIdCKDecommission.

/**
   * Test for blockIdCK with datanode decommission.
   */
@Test
public void testBlockIdCKDecommission() throws Exception {
    final short replFactor = 1;
    short numDn = 2;
    final long blockSize = 512;
    boolean checkDecommissionInProgress = false;
    String[] racks = { "/rack1", "/rack2" };
    String[] hosts = { "host1", "host2" };
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2);
    DistributedFileSystem dfs;
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDn).hosts(hosts).racks(racks).build();
    assertNotNull("Failed Cluster Creation", cluster);
    cluster.waitClusterUp();
    dfs = cluster.getFileSystem();
    assertNotNull("Failed to get FileSystem", dfs);
    DFSTestUtil util = new DFSTestUtil.Builder().setName(getClass().getSimpleName()).setNumFiles(1).build();
    //create files
    final String pathString = new String("/testfile");
    final Path path = new Path(pathString);
    util.createFile(dfs, path, 1024, replFactor, 1000L);
    util.waitReplication(dfs, path, replFactor);
    StringBuilder sb = new StringBuilder();
    for (LocatedBlock lb : util.getAllBlocks(dfs, path)) {
        sb.append(lb.getBlock().getLocalBlock().getBlockName() + " ");
    }
    String[] bIds = sb.toString().split(" ");
    //make sure datanode that has replica is fine before decommission
    String outStr = runFsck(conf, 0, true, "/", "-blockId", bIds[0]);
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
    //decommission datanode
    FSNamesystem fsn = cluster.getNameNode().getNamesystem();
    BlockManager bm = fsn.getBlockManager();
    ExtendedBlock eb = util.getFirstBlock(dfs, path);
    BlockCollection bc = null;
    try {
        fsn.writeLock();
        BlockInfo bi = bm.getStoredBlock(eb.getLocalBlock());
        bc = fsn.getBlockCollection(bi);
    } finally {
        fsn.writeUnlock();
    }
    DatanodeDescriptor dn = bc.getBlocks()[0].getDatanode(0);
    bm.getDatanodeManager().getDecomManager().startDecommission(dn);
    String dnName = dn.getXferAddr();
    //wait for decommission start
    DatanodeInfo datanodeInfo = null;
    int count = 0;
    do {
        Thread.sleep(2000);
        for (DatanodeInfo info : dfs.getDataNodeStats()) {
            if (dnName.equals(info.getXferAddr())) {
                datanodeInfo = info;
            }
        }
        //check decommissioning only once
        if (!checkDecommissionInProgress && datanodeInfo != null && datanodeInfo.isDecommissionInProgress()) {
            String fsckOut = runFsck(conf, 3, true, "/", "-blockId", bIds[0]);
            assertTrue(fsckOut.contains(NamenodeFsck.DECOMMISSIONING_STATUS));
            checkDecommissionInProgress = true;
        }
    } while (datanodeInfo != null && !datanodeInfo.isDecommissioned());
    //check decommissioned
    String fsckOut = runFsck(conf, 2, true, "/", "-blockId", bIds[0]);
    assertTrue(fsckOut.contains(NamenodeFsck.DECOMMISSIONED_STATUS));
}
Also used : Path(org.apache.hadoop.fs.Path) DFSTestUtil(org.apache.hadoop.hdfs.DFSTestUtil) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) BlockCollection(org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Matchers.anyString(org.mockito.Matchers.anyString) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) Test(org.junit.Test)

Example 2 with BlockCollection

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection in project hadoop by apache.

the class TestFsck method testFsckWithDecommissionedReplicas.

/**
   * Test for blocks on decommissioning hosts are not shown as missing.
   */
@Test
public void testFsckWithDecommissionedReplicas() throws Exception {
    final short replFactor = 1;
    short numDn = 2;
    final long blockSize = 512;
    final long fileSize = 1024;
    boolean checkDecommissionInProgress = false;
    String[] racks = { "/rack1", "/rack2" };
    String[] hosts = { "host1", "host2" };
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
    DistributedFileSystem dfs;
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDn).hosts(hosts).racks(racks).build();
    assertNotNull("Failed Cluster Creation", cluster);
    cluster.waitClusterUp();
    dfs = cluster.getFileSystem();
    assertNotNull("Failed to get FileSystem", dfs);
    DFSTestUtil util = new DFSTestUtil.Builder().setName(getClass().getSimpleName()).setNumFiles(1).build();
    //create files
    final String testFile = new String("/testfile");
    final Path path = new Path(testFile);
    util.createFile(dfs, path, fileSize, replFactor, 1000L);
    util.waitReplication(dfs, path, replFactor);
    // make sure datanode that has replica is fine before decommission
    String outStr = runFsck(conf, 0, true, testFile);
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
    // decommission datanode
    FSNamesystem fsn = cluster.getNameNode().getNamesystem();
    BlockManager bm = fsn.getBlockManager();
    ExtendedBlock eb = util.getFirstBlock(dfs, path);
    BlockCollection bc = null;
    try {
        fsn.writeLock();
        BlockInfo bi = bm.getStoredBlock(eb.getLocalBlock());
        bc = fsn.getBlockCollection(bi);
    } finally {
        fsn.writeUnlock();
    }
    DatanodeDescriptor dn = bc.getBlocks()[0].getDatanode(0);
    bm.getDatanodeManager().getDecomManager().startDecommission(dn);
    String dnName = dn.getXferAddr();
    // wait for decommission start
    DatanodeInfo datanodeInfo = null;
    int count = 0;
    do {
        Thread.sleep(2000);
        for (DatanodeInfo info : dfs.getDataNodeStats()) {
            if (dnName.equals(info.getXferAddr())) {
                datanodeInfo = info;
            }
        }
        // instead of corruption (1) during decommissioning
        if (!checkDecommissionInProgress && datanodeInfo != null && datanodeInfo.isDecommissionInProgress()) {
            String fsckOut = runFsck(conf, 0, true, testFile);
            checkDecommissionInProgress = true;
        }
    } while (datanodeInfo != null && !datanodeInfo.isDecommissioned());
    // check the replica status should be healthy(0) after decommission
    // is done
    String fsckOut = runFsck(conf, 0, true, testFile);
}
Also used : Path(org.apache.hadoop.fs.Path) DFSTestUtil(org.apache.hadoop.hdfs.DFSTestUtil) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) BlockCollection(org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection) Matchers.anyString(org.mockito.Matchers.anyString) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) Test(org.junit.Test)

Aggregations

Path (org.apache.hadoop.fs.Path)2 DFSTestUtil (org.apache.hadoop.hdfs.DFSTestUtil)2 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)2 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)2 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)2 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)2 BlockCollection (org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection)2 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)2 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)2 DatanodeDescriptor (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor)2 Test (org.junit.Test)2 Matchers.anyString (org.mockito.Matchers.anyString)2 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)1