Search in sources :

Example 56 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class TestOfflineImageViewerWithStripedBlocks method testFileSize.

private void testFileSize(int numBytes) throws IOException, UnresolvedLinkException, SnapshotAccessControlException {
    fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
    File orgFsimage = null;
    Path file = new Path("/eczone/striped");
    FSDataOutputStream out = fs.create(file, true);
    byte[] bytes = DFSTestUtil.generateSequentialBytes(0, numBytes);
    out.write(bytes);
    out.close();
    // Write results to the fsimage file
    fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
    fs.saveNamespace();
    // Determine location of fsimage file
    orgFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil.getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0));
    if (orgFsimage == null) {
        throw new RuntimeException("Didn't generate or can't find fsimage");
    }
    FSImageLoader loader = FSImageLoader.load(orgFsimage.getAbsolutePath());
    String fileStatus = loader.getFileStatus("/eczone/striped");
    long expectedFileSize = bytes.length;
    // Verify space consumed present in BlockInfoStriped
    FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
    INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile();
    assertEquals(StripedFileTestUtil.getDefaultECPolicy().getId(), fileNode.getErasureCodingPolicyID());
    assertTrue("Invalid block size", fileNode.getBlocks().length > 0);
    long actualFileSize = 0;
    for (BlockInfo blockInfo : fileNode.getBlocks()) {
        assertTrue("Didn't find block striped information", blockInfo instanceof BlockInfoStriped);
        actualFileSize += blockInfo.getNumBytes();
    }
    assertEquals("Wrongly computed file size contains striped blocks", expectedFileSize, actualFileSize);
    // Verify space consumed present in filestatus
    String EXPECTED_FILE_SIZE = "\"length\":" + String.valueOf(expectedFileSize);
    assertTrue("Wrongly computed file size contains striped blocks, file status:" + fileStatus + ". Expected file size is : " + EXPECTED_FILE_SIZE, fileStatus.contains(EXPECTED_FILE_SIZE));
}
Also used : Path(org.apache.hadoop.fs.Path) BlockInfoStriped(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) FSDirectory(org.apache.hadoop.hdfs.server.namenode.FSDirectory) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) File(java.io.File) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile)

Example 57 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class CreateEditsLog method addFiles.

static void addFiles(FSEditLog editLog, int numFiles, short replication, int blocksPerFile, long startingBlockId, long blockSize, FileNameGenerator nameGenerator) {
    PermissionStatus p = new PermissionStatus("joeDoe", "people", new FsPermission((short) 0777));
    INodeId inodeId = new INodeId();
    INodeDirectory dirInode = new INodeDirectory(inodeId.nextValue(), null, p, 0L);
    editLog.logMkDir(BASE_PATH, dirInode);
    BlockInfo[] blocks = new BlockInfo[blocksPerFile];
    for (int iB = 0; iB < blocksPerFile; ++iB) {
        blocks[iB] = new BlockInfoContiguous(new Block(0, blockSize, BLOCK_GENERATION_STAMP), replication);
    }
    long currentBlockId = startingBlockId;
    long bidAtSync = startingBlockId;
    for (int iF = 0; iF < numFiles; iF++) {
        for (int iB = 0; iB < blocksPerFile; ++iB) {
            blocks[iB].setBlockId(currentBlockId++);
        }
        final INodeFile inode = new INodeFile(inodeId.nextValue(), null, p, 0L, 0L, blocks, replication, blockSize);
        inode.toUnderConstruction("", "");
        // Append path to filename with information about blockIDs 
        String path = "_" + iF + "_B" + blocks[0].getBlockId() + "_to_B" + blocks[blocksPerFile - 1].getBlockId() + "_";
        String filePath = nameGenerator.getNextFileName("");
        filePath = filePath + path;
        // Log the new sub directory in edits
        if ((iF % nameGenerator.getFilesPerDirectory()) == 0) {
            String currentDir = nameGenerator.getCurrentDir();
            dirInode = new INodeDirectory(inodeId.nextValue(), null, p, 0L);
            editLog.logMkDir(currentDir, dirInode);
        }
        INodeFile fileUc = new INodeFile(inodeId.nextValue(), null, p, 0L, 0L, BlockInfo.EMPTY_ARRAY, replication, blockSize);
        fileUc.toUnderConstruction("", "");
        editLog.logOpenFile(filePath, fileUc, false, false);
        editLog.logCloseFile(filePath, inode);
        if (currentBlockId - bidAtSync >= 2000) {
            // sync every 2K blocks
            editLog.logSync();
            bidAtSync = currentBlockId;
        }
    }
    System.out.println("Created edits log in directory " + edits_dir);
    System.out.println(" containing " + numFiles + " File-Creates, each file with " + blocksPerFile + " blocks");
    System.out.println(" blocks range: " + startingBlockId + " to " + (currentBlockId - 1));
}
Also used : BlockInfoContiguous(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) Block(org.apache.hadoop.hdfs.protocol.Block) FsPermission(org.apache.hadoop.fs.permission.FsPermission) PermissionStatus(org.apache.hadoop.fs.permission.PermissionStatus)

Example 58 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class TestBlockUnderConstruction method verifyFileBlocks.

private void verifyFileBlocks(String file, boolean isFileOpen) throws IOException {
    FSNamesystem ns = cluster.getNamesystem();
    final INodeFile inode = INodeFile.valueOf(ns.dir.getINode(file), file);
    assertTrue("File " + inode.toString() + " isUnderConstruction = " + inode.isUnderConstruction() + " expected to be " + isFileOpen, inode.isUnderConstruction() == isFileOpen);
    BlockInfo[] blocks = inode.getBlocks();
    assertTrue("File does not have blocks: " + inode.toString(), blocks != null && blocks.length > 0);
    int idx = 0;
    BlockInfo curBlock;
    // all blocks but the last two should be regular blocks
    for (; idx < blocks.length - 2; idx++) {
        curBlock = blocks[idx];
        assertTrue("Block is not complete: " + curBlock, curBlock.isComplete());
        assertTrue("Block is not in BlocksMap: " + curBlock, ns.getBlockManager().getStoredBlock(curBlock) == curBlock);
    }
    // committed if the file is not closed
    if (idx > 0) {
        // penultimate block
        curBlock = blocks[idx - 1];
        assertTrue("Block " + curBlock + " isUnderConstruction = " + inode.isUnderConstruction() + " expected to be " + isFileOpen, (isFileOpen && curBlock.isComplete()) || (!isFileOpen && !curBlock.isComplete() == (curBlock.getBlockUCState() == BlockUCState.COMMITTED)));
        assertTrue("Block is not in BlocksMap: " + curBlock, ns.getBlockManager().getStoredBlock(curBlock) == curBlock);
    }
    // The last block is complete if the file is closed.
    // If the file is open, the last block may be complete or not. 
    // last block
    curBlock = blocks[idx];
    if (!isFileOpen) {
        assertTrue("Block " + curBlock + ", isFileOpen = " + isFileOpen, curBlock.isComplete());
    }
    assertTrue("Block is not in BlocksMap: " + curBlock, ns.getBlockManager().getStoredBlock(curBlock) == curBlock);
}
Also used : BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)

Example 59 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class TestAddStripedBlocks method testAddStripedBlock.

@Test(timeout = 60000)
public void testAddStripedBlock() throws Exception {
    final Path file = new Path("/file1");
    // create an empty file
    FSDataOutputStream out = null;
    try {
        out = dfs.create(file, (short) 1);
        writeAndFlushStripedOutputStream((DFSStripedOutputStream) out.getWrappedStream(), DFS_BYTES_PER_CHECKSUM_DEFAULT);
        FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
        INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile();
        BlockInfo[] blocks = fileNode.getBlocks();
        assertEquals(1, blocks.length);
        Assert.assertTrue(blocks[0].isStriped());
        checkStripedBlockUC((BlockInfoStriped) fileNode.getLastBlock(), true);
        // restart NameNode to check editlog
        cluster.restartNameNode(true);
        fsdir = cluster.getNamesystem().getFSDirectory();
        fileNode = fsdir.getINode4Write(file.toString()).asFile();
        blocks = fileNode.getBlocks();
        assertEquals(1, blocks.length);
        Assert.assertTrue(blocks[0].isStriped());
        checkStripedBlockUC((BlockInfoStriped) fileNode.getLastBlock(), false);
        // save namespace, restart namenode, and check
        dfs = cluster.getFileSystem();
        dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
        dfs.saveNamespace();
        dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
        cluster.restartNameNode(true);
        fsdir = cluster.getNamesystem().getFSDirectory();
        fileNode = fsdir.getINode4Write(file.toString()).asFile();
        blocks = fileNode.getBlocks();
        assertEquals(1, blocks.length);
        Assert.assertTrue(blocks[0].isStriped());
        checkStripedBlockUC((BlockInfoStriped) fileNode.getLastBlock(), false);
    } finally {
        IOUtils.cleanup(null, out);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 60 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class TestAddStripedBlocks method testCheckStripedReplicaCorrupt.

@Test
public void testCheckStripedReplicaCorrupt() throws Exception {
    final int numBlocks = 4;
    final int numStripes = 4;
    final Path filePath = new Path("/corrupt");
    final FSNamesystem ns = cluster.getNameNode().getNamesystem();
    final BlockManager bm = ns.getBlockManager();
    DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks, numStripes, false);
    INodeFile fileNode = ns.getFSDirectory().getINode(filePath.toString()).asFile();
    Assert.assertTrue(fileNode.isStriped());
    BlockInfo stored = fileNode.getBlocks()[0];
    BlockManagerTestUtil.updateState(ns.getBlockManager());
    Assert.assertEquals(0, ns.getCorruptReplicaBlocks());
    // Now send a block report with correct size
    DatanodeStorage storage = new DatanodeStorage(UUID.randomUUID().toString());
    final Block reported = new Block(stored);
    reported.setNumBytes(numStripes * cellSize);
    StorageReceivedDeletedBlocks[] reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
    ns.processIncrementalBlockReport(cluster.getDataNodes().get(0).getDatanodeId(), reports[0]);
    BlockManagerTestUtil.updateState(ns.getBlockManager());
    Assert.assertEquals(0, ns.getCorruptReplicaBlocks());
    // Now send a block report with wrong size
    reported.setBlockId(stored.getBlockId() + 1);
    reported.setNumBytes(numStripes * cellSize - 1);
    reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
    ns.processIncrementalBlockReport(cluster.getDataNodes().get(1).getDatanodeId(), reports[0]);
    BlockManagerTestUtil.updateState(ns.getBlockManager());
    Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
    // Now send a parity block report with correct size
    reported.setBlockId(stored.getBlockId() + dataBlocks);
    reported.setNumBytes(numStripes * cellSize);
    reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
    ns.processIncrementalBlockReport(cluster.getDataNodes().get(2).getDatanodeId(), reports[0]);
    BlockManagerTestUtil.updateState(ns.getBlockManager());
    Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
    // Now send a parity block report with wrong size
    reported.setBlockId(stored.getBlockId() + dataBlocks);
    reported.setNumBytes(numStripes * cellSize + 1);
    reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
    ns.processIncrementalBlockReport(cluster.getDataNodes().get(3).getDatanodeId(), reports[0]);
    BlockManagerTestUtil.updateState(ns.getBlockManager());
    // the total number of corrupted block info is still 1
    Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
    // 2 internal blocks corrupted
    Assert.assertEquals(2, bm.getCorruptReplicas(stored).size());
    // Now change the size of stored block, and test verifying the last
    // block size
    stored.setNumBytes(stored.getNumBytes() + 10);
    reported.setBlockId(stored.getBlockId() + dataBlocks + 2);
    reported.setNumBytes(numStripes * cellSize);
    reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
    ns.processIncrementalBlockReport(cluster.getDataNodes().get(4).getDatanodeId(), reports[0]);
    BlockManagerTestUtil.updateState(ns.getBlockManager());
    Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
    Assert.assertEquals(3, bm.getCorruptReplicas(stored).size());
    // Now send a parity block report with correct size based on adjusted
    // size of stored block
    /** Now stored block has {@link numStripes} full stripes + a cell + 10 */
    stored.setNumBytes(stored.getNumBytes() + cellSize);
    reported.setBlockId(stored.getBlockId());
    reported.setNumBytes((numStripes + 1) * cellSize);
    reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
    ns.processIncrementalBlockReport(cluster.getDataNodes().get(0).getDatanodeId(), reports[0]);
    BlockManagerTestUtil.updateState(ns.getBlockManager());
    Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
    Assert.assertEquals(3, bm.getCorruptReplicas(stored).size());
    reported.setBlockId(stored.getBlockId() + 1);
    reported.setNumBytes(numStripes * cellSize + 10);
    reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
    ns.processIncrementalBlockReport(cluster.getDataNodes().get(0).getDatanodeId(), reports[0]);
    BlockManagerTestUtil.updateState(ns.getBlockManager());
    Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
    Assert.assertEquals(3, bm.getCorruptReplicas(stored).size());
    reported.setBlockId(stored.getBlockId() + dataBlocks);
    reported.setNumBytes((numStripes + 1) * cellSize);
    reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
    ns.processIncrementalBlockReport(cluster.getDataNodes().get(2).getDatanodeId(), reports[0]);
    BlockManagerTestUtil.updateState(ns.getBlockManager());
    Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
    Assert.assertEquals(3, bm.getCorruptReplicas(stored).size());
}
Also used : Path(org.apache.hadoop.fs.Path) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) StorageReceivedDeletedBlocks(org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks) Test(org.junit.Test)

Aggregations

BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)84 Test (org.junit.Test)28 Path (org.apache.hadoop.fs.Path)27 Block (org.apache.hadoop.hdfs.protocol.Block)22 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)19 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)16 INodeFile (org.apache.hadoop.hdfs.server.namenode.INodeFile)14 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)13 IOException (java.io.IOException)11 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)11 BlockInfoContiguous (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous)11 BlockInfoStriped (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped)11 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)10 DatanodeStorageInfo (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo)7 Configuration (org.apache.hadoop.conf.Configuration)6 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)6 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)6 BlockStoragePolicy (org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)5 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)5 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)5