Search in sources :

Example 41 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class TestFSImage method testSupportBlockGroup.

/**
   * Ensure that FSImage supports BlockGroup.
   */
@Test(timeout = 60000)
public void testSupportBlockGroup() throws Exception {
    final short GROUP_SIZE = (short) (testECPolicy.getNumDataUnits() + testECPolicy.getNumParityUnits());
    final int BLOCK_SIZE = 8 * 1024 * 1024;
    Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
    DFSTestUtil.enableAllECPolicies(conf);
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(GROUP_SIZE).build();
        cluster.waitActive();
        DistributedFileSystem fs = cluster.getFileSystem();
        Path parentDir = new Path("/ec-10-4");
        Path childDir = new Path(parentDir, "ec-3-2");
        ErasureCodingPolicy ec32Policy = ErasureCodingPolicyManager.getPolicyByID(HdfsConstants.RS_3_2_POLICY_ID);
        // Create directories and files
        fs.mkdirs(parentDir);
        fs.mkdirs(childDir);
        fs.setErasureCodingPolicy(parentDir, testECPolicy.getName());
        fs.setErasureCodingPolicy(childDir, ec32Policy.getName());
        Path file_10_4 = new Path(parentDir, "striped_file_10_4");
        Path file_3_2 = new Path(childDir, "striped_file_3_2");
        // Write content to files
        byte[] bytes = StripedFileTestUtil.generateBytes(BLOCK_SIZE);
        DFSTestUtil.writeFile(fs, file_10_4, new String(bytes));
        DFSTestUtil.writeFile(fs, file_3_2, new String(bytes));
        // Save namespace and restart NameNode
        fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
        fs.saveNamespace();
        fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
        cluster.restartNameNodes();
        fs = cluster.getFileSystem();
        assertTrue(fs.exists(file_10_4));
        assertTrue(fs.exists(file_3_2));
        // check the information of file_10_4
        FSNamesystem fsn = cluster.getNamesystem();
        INodeFile inode = fsn.dir.getINode(file_10_4.toString()).asFile();
        assertTrue(inode.isStriped());
        assertEquals(testECPolicy.getId(), inode.getErasureCodingPolicyID());
        BlockInfo[] blks = inode.getBlocks();
        assertEquals(1, blks.length);
        assertTrue(blks[0].isStriped());
        assertEquals(testECPolicy.getId(), fs.getErasureCodingPolicy(file_10_4).getId());
        assertEquals(testECPolicy.getId(), ((BlockInfoStriped) blks[0]).getErasureCodingPolicy().getId());
        assertEquals(testECPolicy.getNumDataUnits(), ((BlockInfoStriped) blks[0]).getDataBlockNum());
        assertEquals(testECPolicy.getNumParityUnits(), ((BlockInfoStriped) blks[0]).getParityBlockNum());
        byte[] content = DFSTestUtil.readFileAsBytes(fs, file_10_4);
        assertArrayEquals(bytes, content);
        // check the information of file_3_2
        inode = fsn.dir.getINode(file_3_2.toString()).asFile();
        assertTrue(inode.isStriped());
        assertEquals(ErasureCodingPolicyManager.getPolicyByID(HdfsConstants.RS_3_2_POLICY_ID).getId(), inode.getErasureCodingPolicyID());
        blks = inode.getBlocks();
        assertEquals(1, blks.length);
        assertTrue(blks[0].isStriped());
        assertEquals(ec32Policy.getId(), fs.getErasureCodingPolicy(file_3_2).getId());
        assertEquals(ec32Policy.getNumDataUnits(), ((BlockInfoStriped) blks[0]).getDataBlockNum());
        assertEquals(ec32Policy.getNumParityUnits(), ((BlockInfoStriped) blks[0]).getParityBlockNum());
        content = DFSTestUtil.readFileAsBytes(fs, file_3_2);
        assertArrayEquals(bytes, content);
        // check the EC policy on parent Dir
        ErasureCodingPolicy ecPolicy = fsn.getErasureCodingPolicy(parentDir.toString());
        assertNotNull(ecPolicy);
        assertEquals(testECPolicy.getId(), ecPolicy.getId());
        // check the EC policy on child Dir
        ecPolicy = fsn.getErasureCodingPolicy(childDir.toString());
        assertNotNull(ecPolicy);
        assertEquals(ec32Policy.getId(), ecPolicy.getId());
        // check the EC policy on root directory
        ecPolicy = fsn.getErasureCodingPolicy("/");
        assertNull(ecPolicy);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) BlockInfoStriped(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) Test(org.junit.Test)

Example 42 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class TestCommitBlockSynchronization method testCommitBlockSynchronizationWithClose.

@Test
public void testCommitBlockSynchronizationWithClose() throws IOException {
    INodeFile file = mockFileUnderConstruction();
    Block block = new Block(blockId, length, genStamp);
    FSNamesystem namesystemSpy = makeNameSystemSpy(block, file);
    DatanodeID[] newTargets = new DatanodeID[0];
    ExtendedBlock lastBlock = new ExtendedBlock();
    namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, true, false, newTargets, null);
    // Repeat the call to make sure it returns true
    namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, true, false, newTargets, null);
    BlockInfo completedBlockInfo = new BlockInfoContiguous(block, (short) 1);
    completedBlockInfo.setBlockCollectionId(file.getId());
    completedBlockInfo.setGenerationStamp(genStamp);
    doReturn(completedBlockInfo).when(namesystemSpy).getStoredBlock(any(Block.class));
    doReturn(completedBlockInfo).when(file).getLastBlock();
    namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, true, false, newTargets, null);
}
Also used : BlockInfoContiguous(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) Test(org.junit.Test)

Example 43 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class TestCommitBlockSynchronization method testCommitBlockSynchronization.

@Test
public void testCommitBlockSynchronization() throws IOException {
    INodeFile file = mockFileUnderConstruction();
    Block block = new Block(blockId, length, genStamp);
    FSNamesystem namesystemSpy = makeNameSystemSpy(block, file);
    DatanodeID[] newTargets = new DatanodeID[0];
    ExtendedBlock lastBlock = new ExtendedBlock();
    namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, false, false, newTargets, null);
    // Repeat the call to make sure it does not throw
    namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, false, false, newTargets, null);
    // Simulate 'completing' the block.
    BlockInfo completedBlockInfo = new BlockInfoContiguous(block, (short) 1);
    completedBlockInfo.setBlockCollectionId(file.getId());
    completedBlockInfo.setGenerationStamp(genStamp);
    doReturn(completedBlockInfo).when(namesystemSpy).getStoredBlock(any(Block.class));
    doReturn(completedBlockInfo).when(file).getLastBlock();
    // Repeat the call to make sure it does not throw
    namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, false, false, newTargets, null);
}
Also used : BlockInfoContiguous(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) Test(org.junit.Test)

Example 44 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class TestINodeFile method createINodeFiles.

/** 
   * Creates the required number of files with one block each
   * @param nCount Number of INodes to create
   * @return Array of INode files
   */
private INodeFile[] createINodeFiles(int nCount, String fileNamePrefix) {
    if (nCount <= 0)
        return new INodeFile[1];
    replication = 3;
    preferredBlockSize = 128 * 1024 * 1024;
    INodeFile[] iNodes = new INodeFile[nCount];
    for (int i = 0; i < nCount; i++) {
        iNodes[i] = new INodeFile(i, null, perm, 0L, 0L, null, replication, preferredBlockSize);
        iNodes[i].setLocalName(DFSUtil.string2Bytes(fileNamePrefix + i));
        BlockInfo newblock = new BlockInfoContiguous(replication);
        iNodes[i].addBlock(newblock);
    }
    return iNodes;
}
Also used : BlockInfoContiguous(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)

Example 45 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class TestFsck method testBlockIdCKDecommission.

/**
   * Test for blockIdCK with datanode decommission.
   */
@Test
public void testBlockIdCKDecommission() throws Exception {
    final short replFactor = 1;
    short numDn = 2;
    final long blockSize = 512;
    boolean checkDecommissionInProgress = false;
    String[] racks = { "/rack1", "/rack2" };
    String[] hosts = { "host1", "host2" };
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2);
    DistributedFileSystem dfs;
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDn).hosts(hosts).racks(racks).build();
    assertNotNull("Failed Cluster Creation", cluster);
    cluster.waitClusterUp();
    dfs = cluster.getFileSystem();
    assertNotNull("Failed to get FileSystem", dfs);
    DFSTestUtil util = new DFSTestUtil.Builder().setName(getClass().getSimpleName()).setNumFiles(1).build();
    //create files
    final String pathString = new String("/testfile");
    final Path path = new Path(pathString);
    util.createFile(dfs, path, 1024, replFactor, 1000L);
    util.waitReplication(dfs, path, replFactor);
    StringBuilder sb = new StringBuilder();
    for (LocatedBlock lb : util.getAllBlocks(dfs, path)) {
        sb.append(lb.getBlock().getLocalBlock().getBlockName() + " ");
    }
    String[] bIds = sb.toString().split(" ");
    //make sure datanode that has replica is fine before decommission
    String outStr = runFsck(conf, 0, true, "/", "-blockId", bIds[0]);
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
    //decommission datanode
    FSNamesystem fsn = cluster.getNameNode().getNamesystem();
    BlockManager bm = fsn.getBlockManager();
    ExtendedBlock eb = util.getFirstBlock(dfs, path);
    BlockCollection bc = null;
    try {
        fsn.writeLock();
        BlockInfo bi = bm.getStoredBlock(eb.getLocalBlock());
        bc = fsn.getBlockCollection(bi);
    } finally {
        fsn.writeUnlock();
    }
    DatanodeDescriptor dn = bc.getBlocks()[0].getDatanode(0);
    bm.getDatanodeManager().getDecomManager().startDecommission(dn);
    String dnName = dn.getXferAddr();
    //wait for decommission start
    DatanodeInfo datanodeInfo = null;
    int count = 0;
    do {
        Thread.sleep(2000);
        for (DatanodeInfo info : dfs.getDataNodeStats()) {
            if (dnName.equals(info.getXferAddr())) {
                datanodeInfo = info;
            }
        }
        //check decommissioning only once
        if (!checkDecommissionInProgress && datanodeInfo != null && datanodeInfo.isDecommissionInProgress()) {
            String fsckOut = runFsck(conf, 3, true, "/", "-blockId", bIds[0]);
            assertTrue(fsckOut.contains(NamenodeFsck.DECOMMISSIONING_STATUS));
            checkDecommissionInProgress = true;
        }
    } while (datanodeInfo != null && !datanodeInfo.isDecommissioned());
    //check decommissioned
    String fsckOut = runFsck(conf, 2, true, "/", "-blockId", bIds[0]);
    assertTrue(fsckOut.contains(NamenodeFsck.DECOMMISSIONED_STATUS));
}
Also used : Path(org.apache.hadoop.fs.Path) DFSTestUtil(org.apache.hadoop.hdfs.DFSTestUtil) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) BlockCollection(org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Matchers.anyString(org.mockito.Matchers.anyString) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) Test(org.junit.Test)

Aggregations

BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)84 Test (org.junit.Test)28 Path (org.apache.hadoop.fs.Path)27 Block (org.apache.hadoop.hdfs.protocol.Block)22 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)19 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)16 INodeFile (org.apache.hadoop.hdfs.server.namenode.INodeFile)14 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)13 IOException (java.io.IOException)11 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)11 BlockInfoContiguous (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous)11 BlockInfoStriped (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped)11 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)10 DatanodeStorageInfo (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo)7 Configuration (org.apache.hadoop.conf.Configuration)6 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)6 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)6 BlockStoragePolicy (org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)5 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)5 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)5