Search in sources :

Example 66 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class TestAddStripedBlocks method testAddStripedBlock.

@Test(timeout = 60000)
public void testAddStripedBlock() throws Exception {
    final Path file = new Path("/file1");
    // create an empty file
    FSDataOutputStream out = null;
    try {
        out = dfs.create(file, (short) 1);
        writeAndFlushStripedOutputStream((DFSStripedOutputStream) out.getWrappedStream(), DFS_BYTES_PER_CHECKSUM_DEFAULT);
        FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
        INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile();
        BlockInfo[] blocks = fileNode.getBlocks();
        assertEquals(1, blocks.length);
        Assert.assertTrue(blocks[0].isStriped());
        checkStripedBlockUC((BlockInfoStriped) fileNode.getLastBlock(), true);
        // restart NameNode to check editlog
        cluster.restartNameNode(true);
        fsdir = cluster.getNamesystem().getFSDirectory();
        fileNode = fsdir.getINode4Write(file.toString()).asFile();
        blocks = fileNode.getBlocks();
        assertEquals(1, blocks.length);
        Assert.assertTrue(blocks[0].isStriped());
        checkStripedBlockUC((BlockInfoStriped) fileNode.getLastBlock(), false);
        // save namespace, restart namenode, and check
        dfs = cluster.getFileSystem();
        dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
        dfs.saveNamespace();
        dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
        cluster.restartNameNode(true);
        fsdir = cluster.getNamesystem().getFSDirectory();
        fileNode = fsdir.getINode4Write(file.toString()).asFile();
        blocks = fileNode.getBlocks();
        assertEquals(1, blocks.length);
        Assert.assertTrue(blocks[0].isStriped());
        checkStripedBlockUC((BlockInfoStriped) fileNode.getLastBlock(), false);
    } finally {
        IOUtils.cleanup(null, out);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 67 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class TestAddStripedBlocks method testCheckStripedReplicaCorrupt.

@Test
public void testCheckStripedReplicaCorrupt() throws Exception {
    final int numBlocks = 4;
    final int numStripes = 4;
    final Path filePath = new Path("/corrupt");
    final FSNamesystem ns = cluster.getNameNode().getNamesystem();
    final BlockManager bm = ns.getBlockManager();
    DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks, numStripes, false);
    INodeFile fileNode = ns.getFSDirectory().getINode(filePath.toString()).asFile();
    Assert.assertTrue(fileNode.isStriped());
    BlockInfo stored = fileNode.getBlocks()[0];
    BlockManagerTestUtil.updateState(ns.getBlockManager());
    Assert.assertEquals(0, ns.getCorruptReplicaBlocks());
    // Now send a block report with correct size
    DatanodeStorage storage = new DatanodeStorage(UUID.randomUUID().toString());
    final Block reported = new Block(stored);
    reported.setNumBytes(numStripes * cellSize);
    StorageReceivedDeletedBlocks[] reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
    ns.processIncrementalBlockReport(cluster.getDataNodes().get(0).getDatanodeId(), reports[0]);
    BlockManagerTestUtil.updateState(ns.getBlockManager());
    Assert.assertEquals(0, ns.getCorruptReplicaBlocks());
    // Now send a block report with wrong size
    reported.setBlockId(stored.getBlockId() + 1);
    reported.setNumBytes(numStripes * cellSize - 1);
    reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
    ns.processIncrementalBlockReport(cluster.getDataNodes().get(1).getDatanodeId(), reports[0]);
    BlockManagerTestUtil.updateState(ns.getBlockManager());
    Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
    // Now send a parity block report with correct size
    reported.setBlockId(stored.getBlockId() + dataBlocks);
    reported.setNumBytes(numStripes * cellSize);
    reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
    ns.processIncrementalBlockReport(cluster.getDataNodes().get(2).getDatanodeId(), reports[0]);
    BlockManagerTestUtil.updateState(ns.getBlockManager());
    Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
    // Now send a parity block report with wrong size
    reported.setBlockId(stored.getBlockId() + dataBlocks);
    reported.setNumBytes(numStripes * cellSize + 1);
    reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
    ns.processIncrementalBlockReport(cluster.getDataNodes().get(3).getDatanodeId(), reports[0]);
    BlockManagerTestUtil.updateState(ns.getBlockManager());
    // the total number of corrupted block info is still 1
    Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
    // 2 internal blocks corrupted
    Assert.assertEquals(2, bm.getCorruptReplicas(stored).size());
    // Now change the size of stored block, and test verifying the last
    // block size
    stored.setNumBytes(stored.getNumBytes() + 10);
    reported.setBlockId(stored.getBlockId() + dataBlocks + 2);
    reported.setNumBytes(numStripes * cellSize);
    reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
    ns.processIncrementalBlockReport(cluster.getDataNodes().get(4).getDatanodeId(), reports[0]);
    BlockManagerTestUtil.updateState(ns.getBlockManager());
    Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
    Assert.assertEquals(3, bm.getCorruptReplicas(stored).size());
    // Now send a parity block report with correct size based on adjusted
    // size of stored block
    /** Now stored block has {@link numStripes} full stripes + a cell + 10 */
    stored.setNumBytes(stored.getNumBytes() + cellSize);
    reported.setBlockId(stored.getBlockId());
    reported.setNumBytes((numStripes + 1) * cellSize);
    reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
    ns.processIncrementalBlockReport(cluster.getDataNodes().get(0).getDatanodeId(), reports[0]);
    BlockManagerTestUtil.updateState(ns.getBlockManager());
    Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
    Assert.assertEquals(3, bm.getCorruptReplicas(stored).size());
    reported.setBlockId(stored.getBlockId() + 1);
    reported.setNumBytes(numStripes * cellSize + 10);
    reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
    ns.processIncrementalBlockReport(cluster.getDataNodes().get(0).getDatanodeId(), reports[0]);
    BlockManagerTestUtil.updateState(ns.getBlockManager());
    Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
    Assert.assertEquals(3, bm.getCorruptReplicas(stored).size());
    reported.setBlockId(stored.getBlockId() + dataBlocks);
    reported.setNumBytes((numStripes + 1) * cellSize);
    reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
    ns.processIncrementalBlockReport(cluster.getDataNodes().get(2).getDatanodeId(), reports[0]);
    BlockManagerTestUtil.updateState(ns.getBlockManager());
    Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
    Assert.assertEquals(3, bm.getCorruptReplicas(stored).size());
}
Also used : Path(org.apache.hadoop.fs.Path) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) StorageReceivedDeletedBlocks(org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks) Test(org.junit.Test)

Example 68 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class TestFSEditLogLoader method testUpdateStripedBlocks.

@Test
public void testUpdateStripedBlocks() throws IOException {
    // start a cluster
    Configuration conf = new HdfsConfiguration();
    conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, testECPolicy.getName());
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9).build();
        cluster.waitActive();
        DistributedFileSystem fs = cluster.getFileSystem();
        FSNamesystem fns = cluster.getNamesystem();
        String testDir = "/ec";
        String testFile = "testfile_002";
        String testFilePath = testDir + "/" + testFile;
        String clientName = "testUser2";
        String clientMachine = "testMachine2";
        long blkId = 1;
        long blkNumBytes = 1024;
        long timestamp = 1426222918;
        short blockNum = (short) testECPolicy.getNumDataUnits();
        short parityNum = (short) testECPolicy.getNumParityUnits();
        //set the storage policy of the directory
        fs.mkdir(new Path(testDir), new FsPermission("755"));
        fs.getClient().getNamenode().setErasureCodingPolicy(testDir, testECPolicy.getName());
        //create a file with striped blocks
        Path p = new Path(testFilePath);
        DFSTestUtil.createFile(fs, p, 0, (short) 1, 1);
        BlockInfoStriped stripedBlk = new BlockInfoStriped(new Block(blkId, blkNumBytes, timestamp), testECPolicy);
        INodeFile file = (INodeFile) fns.getFSDirectory().getINode(testFilePath);
        file.toUnderConstruction(clientName, clientMachine);
        file.addBlock(stripedBlk);
        fns.getEditLog().logAddBlock(testFilePath, file);
        TestINodeFile.toCompleteFile(file);
        fns.enterSafeMode(false);
        fns.saveNamespace(0, 0);
        fns.leaveSafeMode(false);
        //update the last block
        long newBlkNumBytes = 1024 * 8;
        long newTimestamp = 1426222918 + 3600;
        file.toUnderConstruction(clientName, clientMachine);
        file.getLastBlock().setNumBytes(newBlkNumBytes);
        file.getLastBlock().setGenerationStamp(newTimestamp);
        fns.getEditLog().logUpdateBlocks(testFilePath, file, true);
        TestINodeFile.toCompleteFile(file);
        //After the namenode restarts if the block by loaded is the same as above
        //(new block size and timestamp) it means that we have successfully
        //applied the edit log to the fsimage.
        cluster.restartNameNodes();
        cluster.waitActive();
        fns = cluster.getNamesystem();
        INodeFile inodeLoaded = (INodeFile) fns.getFSDirectory().getINode(testFilePath);
        assertTrue(inodeLoaded.isStriped());
        BlockInfo[] blks = inodeLoaded.getBlocks();
        assertEquals(1, blks.length);
        assertTrue(blks[0].isStriped());
        assertEquals(blkId, blks[0].getBlockId());
        assertEquals(newBlkNumBytes, blks[0].getNumBytes());
        assertEquals(newTimestamp, blks[0].getGenerationStamp());
        assertEquals(blockNum, ((BlockInfoStriped) blks[0]).getDataBlockNum());
        assertEquals(parityNum, ((BlockInfoStriped) blks[0]).getParityBlockNum());
        cluster.shutdown();
        cluster = null;
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) BlockInfoStriped(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) Block(org.apache.hadoop.hdfs.protocol.Block) FsPermission(org.apache.hadoop.fs.permission.FsPermission) Test(org.junit.Test)

Example 69 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class TestFSImage method testSupportBlockGroup.

/**
   * Ensure that FSImage supports BlockGroup.
   */
@Test(timeout = 60000)
public void testSupportBlockGroup() throws Exception {
    final short GROUP_SIZE = (short) (testECPolicy.getNumDataUnits() + testECPolicy.getNumParityUnits());
    final int BLOCK_SIZE = 8 * 1024 * 1024;
    Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
    DFSTestUtil.enableAllECPolicies(conf);
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(GROUP_SIZE).build();
        cluster.waitActive();
        DistributedFileSystem fs = cluster.getFileSystem();
        Path parentDir = new Path("/ec-10-4");
        Path childDir = new Path(parentDir, "ec-3-2");
        ErasureCodingPolicy ec32Policy = ErasureCodingPolicyManager.getPolicyByID(HdfsConstants.RS_3_2_POLICY_ID);
        // Create directories and files
        fs.mkdirs(parentDir);
        fs.mkdirs(childDir);
        fs.setErasureCodingPolicy(parentDir, testECPolicy.getName());
        fs.setErasureCodingPolicy(childDir, ec32Policy.getName());
        Path file_10_4 = new Path(parentDir, "striped_file_10_4");
        Path file_3_2 = new Path(childDir, "striped_file_3_2");
        // Write content to files
        byte[] bytes = StripedFileTestUtil.generateBytes(BLOCK_SIZE);
        DFSTestUtil.writeFile(fs, file_10_4, new String(bytes));
        DFSTestUtil.writeFile(fs, file_3_2, new String(bytes));
        // Save namespace and restart NameNode
        fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
        fs.saveNamespace();
        fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
        cluster.restartNameNodes();
        fs = cluster.getFileSystem();
        assertTrue(fs.exists(file_10_4));
        assertTrue(fs.exists(file_3_2));
        // check the information of file_10_4
        FSNamesystem fsn = cluster.getNamesystem();
        INodeFile inode = fsn.dir.getINode(file_10_4.toString()).asFile();
        assertTrue(inode.isStriped());
        assertEquals(testECPolicy.getId(), inode.getErasureCodingPolicyID());
        BlockInfo[] blks = inode.getBlocks();
        assertEquals(1, blks.length);
        assertTrue(blks[0].isStriped());
        assertEquals(testECPolicy.getId(), fs.getErasureCodingPolicy(file_10_4).getId());
        assertEquals(testECPolicy.getId(), ((BlockInfoStriped) blks[0]).getErasureCodingPolicy().getId());
        assertEquals(testECPolicy.getNumDataUnits(), ((BlockInfoStriped) blks[0]).getDataBlockNum());
        assertEquals(testECPolicy.getNumParityUnits(), ((BlockInfoStriped) blks[0]).getParityBlockNum());
        byte[] content = DFSTestUtil.readFileAsBytes(fs, file_10_4);
        assertArrayEquals(bytes, content);
        // check the information of file_3_2
        inode = fsn.dir.getINode(file_3_2.toString()).asFile();
        assertTrue(inode.isStriped());
        assertEquals(ErasureCodingPolicyManager.getPolicyByID(HdfsConstants.RS_3_2_POLICY_ID).getId(), inode.getErasureCodingPolicyID());
        blks = inode.getBlocks();
        assertEquals(1, blks.length);
        assertTrue(blks[0].isStriped());
        assertEquals(ec32Policy.getId(), fs.getErasureCodingPolicy(file_3_2).getId());
        assertEquals(ec32Policy.getNumDataUnits(), ((BlockInfoStriped) blks[0]).getDataBlockNum());
        assertEquals(ec32Policy.getNumParityUnits(), ((BlockInfoStriped) blks[0]).getParityBlockNum());
        content = DFSTestUtil.readFileAsBytes(fs, file_3_2);
        assertArrayEquals(bytes, content);
        // check the EC policy on parent Dir
        ErasureCodingPolicy ecPolicy = fsn.getErasureCodingPolicy(parentDir.toString());
        assertNotNull(ecPolicy);
        assertEquals(testECPolicy.getId(), ecPolicy.getId());
        // check the EC policy on child Dir
        ecPolicy = fsn.getErasureCodingPolicy(childDir.toString());
        assertNotNull(ecPolicy);
        assertEquals(ec32Policy.getId(), ecPolicy.getId());
        // check the EC policy on root directory
        ecPolicy = fsn.getErasureCodingPolicy("/");
        assertNull(ecPolicy);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) BlockInfoStriped(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) Test(org.junit.Test)

Example 70 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class TestCommitBlockSynchronization method testCommitBlockSynchronizationWithClose.

@Test
public void testCommitBlockSynchronizationWithClose() throws IOException {
    INodeFile file = mockFileUnderConstruction();
    Block block = new Block(blockId, length, genStamp);
    FSNamesystem namesystemSpy = makeNameSystemSpy(block, file);
    DatanodeID[] newTargets = new DatanodeID[0];
    ExtendedBlock lastBlock = new ExtendedBlock();
    namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, true, false, newTargets, null);
    // Repeat the call to make sure it returns true
    namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, true, false, newTargets, null);
    BlockInfo completedBlockInfo = new BlockInfoContiguous(block, (short) 1);
    completedBlockInfo.setBlockCollectionId(file.getId());
    completedBlockInfo.setGenerationStamp(genStamp);
    doReturn(completedBlockInfo).when(namesystemSpy).getStoredBlock(any(Block.class));
    doReturn(completedBlockInfo).when(file).getLastBlock();
    namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, true, false, newTargets, null);
}
Also used : BlockInfoContiguous(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) Test(org.junit.Test)

Aggregations

BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)84 Test (org.junit.Test)28 Path (org.apache.hadoop.fs.Path)27 Block (org.apache.hadoop.hdfs.protocol.Block)22 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)19 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)16 INodeFile (org.apache.hadoop.hdfs.server.namenode.INodeFile)14 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)13 IOException (java.io.IOException)11 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)11 BlockInfoContiguous (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous)11 BlockInfoStriped (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped)11 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)10 DatanodeStorageInfo (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo)7 Configuration (org.apache.hadoop.conf.Configuration)6 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)6 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)6 BlockStoragePolicy (org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)5 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)5 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)5