Search in sources :

Example 21 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class FSEditLog method logAddBlock.

public void logAddBlock(String path, INodeFile file) {
    Preconditions.checkArgument(file.isUnderConstruction());
    BlockInfo[] blocks = file.getBlocks();
    Preconditions.checkState(blocks != null && blocks.length > 0);
    BlockInfo pBlock = blocks.length > 1 ? blocks[blocks.length - 2] : null;
    BlockInfo lastBlock = blocks[blocks.length - 1];
    AddBlockOp op = AddBlockOp.getInstance(cache.get()).setPath(path).setPenultimateBlock(pBlock).setLastBlock(lastBlock);
    logEdit(op);
}
Also used : AddBlockOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddBlockOp) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)

Example 22 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class DFSTestUtil method addBlockToFile.

/**
   * Adds a block or a striped block group to a file.
   * This method only manipulates NameNode
   * states of the file and the block without injecting data to DataNode.
   * It does mimic block reports.
   * You should disable periodical heartbeat before use this.
   * @param isStripedBlock a boolean tell if the block added a striped block
   * @param dataNodes List DataNodes to host the striped block group
   * @param previous Previous block in the file
   * @param numStripes Number of stripes in each block group
   * @param len block size for a non striped block added
   * @return The added block or block group
   */
public static Block addBlockToFile(boolean isStripedBlock, List<DataNode> dataNodes, DistributedFileSystem fs, FSNamesystem ns, String file, INodeFile fileNode, String clientName, ExtendedBlock previous, int numStripes, int len) throws Exception {
    fs.getClient().namenode.addBlock(file, clientName, previous, null, fileNode.getId(), null, null);
    final BlockInfo lastBlock = fileNode.getLastBlock();
    final int groupSize = fileNode.getPreferredBlockReplication();
    assert dataNodes.size() >= groupSize;
    // 1. RECEIVING_BLOCK IBR
    for (int i = 0; i < groupSize; i++) {
        DataNode dn = dataNodes.get(i);
        final Block block = new Block(lastBlock.getBlockId() + i, 0, lastBlock.getGenerationStamp());
        DatanodeStorage storage = new DatanodeStorage(UUID.randomUUID().toString());
        StorageReceivedDeletedBlocks[] reports = DFSTestUtil.makeReportForReceivedBlock(block, ReceivedDeletedBlockInfo.BlockStatus.RECEIVING_BLOCK, storage);
        for (StorageReceivedDeletedBlocks report : reports) {
            ns.processIncrementalBlockReport(dn.getDatanodeId(), report);
        }
    }
    final ErasureCodingPolicy ecPolicy = fs.getErasureCodingPolicy(new Path(file));
    // 2. RECEIVED_BLOCK IBR
    long blockSize = isStripedBlock ? numStripes * ecPolicy.getCellSize() : len;
    for (int i = 0; i < groupSize; i++) {
        DataNode dn = dataNodes.get(i);
        final Block block = new Block(lastBlock.getBlockId() + i, blockSize, lastBlock.getGenerationStamp());
        DatanodeStorage storage = new DatanodeStorage(UUID.randomUUID().toString());
        StorageReceivedDeletedBlocks[] reports = DFSTestUtil.makeReportForReceivedBlock(block, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
        for (StorageReceivedDeletedBlocks report : reports) {
            ns.processIncrementalBlockReport(dn.getDatanodeId(), report);
        }
    }
    long bytes = isStripedBlock ? numStripes * ecPolicy.getCellSize() * ecPolicy.getNumDataUnits() : len;
    lastBlock.setNumBytes(bytes);
    return lastBlock;
}
Also used : Path(org.apache.hadoop.fs.Path) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy) Block(org.apache.hadoop.hdfs.protocol.Block) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) StorageReceivedDeletedBlocks(org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks)

Example 23 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class TestReadStripedFileWithDecoding method testReportBadBlock.

/**
   * After reading a corrupted block, make sure the client can correctly report
   * the corruption to the NameNode.
   */
@Test
public void testReportBadBlock() throws IOException {
    // create file
    final Path file = new Path("/corrupted");
    // length of "corruption"
    final int length = 10;
    final byte[] bytes = StripedFileTestUtil.generateBytes(length);
    DFSTestUtil.writeFile(fs, file, bytes);
    // corrupt the first data block
    int dnIndex = findFirstDataNode(file, cellSize * dataBlocks);
    Assert.assertNotEquals(-1, dnIndex);
    LocatedStripedBlock slb = (LocatedStripedBlock) fs.getClient().getLocatedBlocks(file.toString(), 0, cellSize * dataBlocks).get(0);
    final LocatedBlock[] blks = StripedBlockUtil.parseStripedBlockGroup(slb, cellSize, dataBlocks, parityBlocks);
    // find the first block file
    File storageDir = cluster.getInstanceStorageDir(dnIndex, 0);
    File blkFile = MiniDFSCluster.getBlockFile(storageDir, blks[0].getBlock());
    Assert.assertTrue("Block file does not exist", blkFile.exists());
    // corrupt the block file
    LOG.info("Deliberately corrupting file " + blkFile.getName());
    try (FileOutputStream out = new FileOutputStream(blkFile)) {
        out.write("corruption".getBytes());
    }
    // in NameNode
    for (DataNode dn : cluster.getDataNodes()) {
        DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
    }
    try {
        // do stateful read
        StripedFileTestUtil.verifyStatefulRead(fs, file, length, bytes, ByteBuffer.allocate(1024));
        // check whether the corruption has been reported to the NameNode
        final FSNamesystem ns = cluster.getNamesystem();
        final BlockManager bm = ns.getBlockManager();
        BlockInfo blockInfo = (ns.getFSDirectory().getINode4Write(file.toString()).asFile().getBlocks())[0];
        Assert.assertEquals(1, bm.getCorruptReplicas(blockInfo).size());
    } finally {
        for (DataNode dn : cluster.getDataNodes()) {
            DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, false);
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) FileOutputStream(java.io.FileOutputStream) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) File(java.io.File) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Example 24 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class TestCommitBlockSynchronization method makeNameSystemSpy.

private FSNamesystem makeNameSystemSpy(Block block, INodeFile file) throws IOException {
    Configuration conf = new Configuration();
    FSEditLog editlog = mock(FSEditLog.class);
    FSImage image = new FSImage(conf);
    Whitebox.setInternalState(image, "editLog", editlog);
    final DatanodeStorageInfo[] targets = {};
    FSNamesystem namesystem = new FSNamesystem(conf, image);
    namesystem.setImageLoaded(true);
    // FSNamesystem's isFileDeleted() method will return false on this file
    if (file.getParent() == null) {
        INodeDirectory mparent = mock(INodeDirectory.class);
        INodeDirectory parent = new INodeDirectory(mparent.getId(), new byte[0], mparent.getPermissionStatus(), mparent.getAccessTime());
        parent.setLocalName(new byte[0]);
        parent.addChild(file);
        file.setParent(parent);
    }
    namesystem.dir.getINodeMap().put(file);
    FSNamesystem namesystemSpy = spy(namesystem);
    BlockInfo blockInfo = new BlockInfoContiguous(block, (short) 1);
    blockInfo.convertToBlockUnderConstruction(HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, targets);
    blockInfo.setBlockCollectionId(file.getId());
    blockInfo.setGenerationStamp(genStamp);
    blockInfo.getUnderConstructionFeature().initializeBlockRecovery(blockInfo, genStamp);
    doReturn(blockInfo).when(file).removeLastBlock(any(Block.class));
    doReturn(true).when(file).isUnderConstruction();
    doReturn(new BlockInfoContiguous[1]).when(file).getBlocks();
    doReturn(blockInfo).when(namesystemSpy).getStoredBlock(any(Block.class));
    doReturn(blockInfo).when(file).getLastBlock();
    doNothing().when(namesystemSpy).closeFileCommitBlocks(any(String.class), any(INodeFile.class), any(BlockInfo.class));
    doReturn(mock(FSEditLog.class)).when(namesystemSpy).getEditLog();
    return namesystemSpy;
}
Also used : BlockInfoContiguous(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) Configuration(org.apache.hadoop.conf.Configuration) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block)

Example 25 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class TestSnapshotDeletion method testDeleteCurrentFileDirectory.

/**
   * Test deleting a directory which is a descendant of a snapshottable
   * directory. In the test we need to cover the following cases:
   * 
   * <pre>
   * 1. Delete current INodeFile/INodeDirectory without taking any snapshot.
   * 2. Delete current INodeFile/INodeDirectory while snapshots have been taken 
   *    on ancestor(s).
   * 3. Delete current INodeFileWithSnapshot.
   * 4. Delete current INodeDirectoryWithSnapshot.
   * </pre>
   */
@Test(timeout = 300000)
public void testDeleteCurrentFileDirectory() throws Exception {
    // create a folder which will be deleted before taking snapshots
    Path deleteDir = new Path(subsub, "deleteDir");
    Path deleteFile = new Path(deleteDir, "deleteFile");
    // create a directory that we will not change during the whole process.
    Path noChangeDirParent = new Path(sub, "noChangeDirParent");
    Path noChangeDir = new Path(noChangeDirParent, "noChangeDir");
    // create a file that we will not change in the future
    Path noChangeFile = new Path(noChangeDir, "noChangeFile");
    DFSTestUtil.createFile(hdfs, deleteFile, BLOCKSIZE, REPLICATION, seed);
    DFSTestUtil.createFile(hdfs, noChangeFile, BLOCKSIZE, REPLICATION, seed);
    // we will change this file's metadata in the future
    Path metaChangeFile1 = new Path(subsub, "metaChangeFile1");
    DFSTestUtil.createFile(hdfs, metaChangeFile1, BLOCKSIZE, REPLICATION, seed);
    // another file, created under noChangeDir, whose metadata will be changed
    Path metaChangeFile2 = new Path(noChangeDir, "metaChangeFile2");
    DFSTestUtil.createFile(hdfs, metaChangeFile2, BLOCKSIZE, REPLICATION, seed);
    // Case 1: delete deleteDir before taking snapshots
    hdfs.setQuota(dir, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
    checkQuotaUsageComputation(dir, 10, BLOCKSIZE * REPLICATION * 4);
    hdfs.delete(deleteDir, true);
    checkQuotaUsageComputation(dir, 8, BLOCKSIZE * REPLICATION * 3);
    // create snapshot s0
    SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");
    // after creating snapshot s0, create a directory tempdir under dir and then
    // delete dir immediately
    Path tempDir = new Path(dir, "tempdir");
    Path tempFile = new Path(tempDir, "tempfile");
    DFSTestUtil.createFile(hdfs, tempFile, BLOCKSIZE, REPLICATION, seed);
    final INodeFile temp = TestSnapshotBlocksMap.assertBlockCollection(tempFile.toString(), 1, fsdir, blockmanager);
    BlockInfo[] blocks = temp.getBlocks();
    hdfs.delete(tempDir, true);
    // check dir's quota usage
    checkQuotaUsageComputation(dir, 8, BLOCKSIZE * REPLICATION * 3);
    // check blocks of tempFile
    for (BlockInfo b : blocks) {
        assertEquals(INVALID_INODE_ID, b.getBlockCollectionId());
    }
    // make a change: create a new file under subsub
    Path newFileAfterS0 = new Path(subsub, "newFile");
    DFSTestUtil.createFile(hdfs, newFileAfterS0, BLOCKSIZE, REPLICATION, seed);
    // further change: change the replicator factor of metaChangeFile
    hdfs.setReplication(metaChangeFile1, REPLICATION_1);
    hdfs.setReplication(metaChangeFile2, REPLICATION_1);
    // create snapshot s1
    SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
    // check dir's quota usage
    checkQuotaUsageComputation(dir, 9L, BLOCKSIZE * REPLICATION * 4);
    // get two snapshots for later use
    Snapshot snapshot0 = fsdir.getINode(dir.toString()).asDirectory().getSnapshot(DFSUtil.string2Bytes("s0"));
    Snapshot snapshot1 = fsdir.getINode(dir.toString()).asDirectory().getSnapshot(DFSUtil.string2Bytes("s1"));
    // Case 2 + Case 3: delete noChangeDirParent, noChangeFile, and
    // metaChangeFile2. Note that when we directly delete a directory, the 
    // directory will be converted to an INodeDirectoryWithSnapshot. To make
    // sure the deletion goes through an INodeDirectory, we delete the parent
    // of noChangeDir
    hdfs.delete(noChangeDirParent, true);
    // while deletion, we add a diff for metaChangeFile2 as its snapshot copy
    // for s1, we also add diffs for both sub and noChangeDirParent
    checkQuotaUsageComputation(dir, 9L, BLOCKSIZE * REPLICATION * 4);
    // check the snapshot copy of noChangeDir 
    Path snapshotNoChangeDir = SnapshotTestHelper.getSnapshotPath(dir, "s1", sub.getName() + "/" + noChangeDirParent.getName() + "/" + noChangeDir.getName());
    INodeDirectory snapshotNode = (INodeDirectory) fsdir.getINode(snapshotNoChangeDir.toString());
    // should still be an INodeDirectory
    assertEquals(INodeDirectory.class, snapshotNode.getClass());
    ReadOnlyList<INode> children = snapshotNode.getChildrenList(Snapshot.CURRENT_STATE_ID);
    // check 2 children: noChangeFile and metaChangeFile2
    assertEquals(2, children.size());
    INode noChangeFileSCopy = children.get(1);
    assertEquals(noChangeFile.getName(), noChangeFileSCopy.getLocalName());
    assertEquals(INodeFile.class, noChangeFileSCopy.getClass());
    TestSnapshotBlocksMap.assertBlockCollection(new Path(snapshotNoChangeDir, noChangeFileSCopy.getLocalName()).toString(), 1, fsdir, blockmanager);
    INodeFile metaChangeFile2SCopy = children.get(0).asFile();
    assertEquals(metaChangeFile2.getName(), metaChangeFile2SCopy.getLocalName());
    assertTrue(metaChangeFile2SCopy.isWithSnapshot());
    assertFalse(metaChangeFile2SCopy.isUnderConstruction());
    TestSnapshotBlocksMap.assertBlockCollection(new Path(snapshotNoChangeDir, metaChangeFile2SCopy.getLocalName()).toString(), 1, fsdir, blockmanager);
    // check the replication factor of metaChangeFile2SCopy
    assertEquals(REPLICATION_1, metaChangeFile2SCopy.getFileReplication(Snapshot.CURRENT_STATE_ID));
    assertEquals(REPLICATION_1, metaChangeFile2SCopy.getFileReplication(snapshot1.getId()));
    assertEquals(REPLICATION, metaChangeFile2SCopy.getFileReplication(snapshot0.getId()));
    // Case 4: delete directory sub
    // before deleting sub, we first create a new file under sub
    Path newFile = new Path(sub, "newFile");
    DFSTestUtil.createFile(hdfs, newFile, BLOCKSIZE, REPLICATION, seed);
    final INodeFile newFileNode = TestSnapshotBlocksMap.assertBlockCollection(newFile.toString(), 1, fsdir, blockmanager);
    blocks = newFileNode.getBlocks();
    checkQuotaUsageComputation(dir, 10L, BLOCKSIZE * REPLICATION * 5);
    hdfs.delete(sub, true);
    // while deletion, we add diff for subsub and metaChangeFile1, and remove
    // newFile
    checkQuotaUsageComputation(dir, 9L, BLOCKSIZE * REPLICATION * 4);
    for (BlockInfo b : blocks) {
        assertEquals(INVALID_INODE_ID, b.getBlockCollectionId());
    }
    // make sure the whole subtree of sub is stored correctly in snapshot
    Path snapshotSub = SnapshotTestHelper.getSnapshotPath(dir, "s1", sub.getName());
    INodeDirectory snapshotNode4Sub = fsdir.getINode(snapshotSub.toString()).asDirectory();
    assertTrue(snapshotNode4Sub.isWithSnapshot());
    // the snapshot copy of sub has only one child subsub.
    // newFile should have been destroyed
    assertEquals(1, snapshotNode4Sub.getChildrenList(Snapshot.CURRENT_STATE_ID).size());
    // but should have two children, subsub and noChangeDir, when s1 was taken  
    assertEquals(2, snapshotNode4Sub.getChildrenList(snapshot1.getId()).size());
    // check the snapshot copy of subsub, which is contained in the subtree of
    // sub's snapshot copy
    INode snapshotNode4Subsub = snapshotNode4Sub.getChildrenList(Snapshot.CURRENT_STATE_ID).get(0);
    assertTrue(snapshotNode4Subsub.asDirectory().isWithSnapshot());
    assertTrue(snapshotNode4Sub == snapshotNode4Subsub.getParent());
    // check the children of subsub
    INodeDirectory snapshotSubsubDir = (INodeDirectory) snapshotNode4Subsub;
    children = snapshotSubsubDir.getChildrenList(Snapshot.CURRENT_STATE_ID);
    assertEquals(2, children.size());
    assertEquals(children.get(0).getLocalName(), metaChangeFile1.getName());
    assertEquals(children.get(1).getLocalName(), newFileAfterS0.getName());
    // only one child before snapshot s0 
    children = snapshotSubsubDir.getChildrenList(snapshot0.getId());
    assertEquals(1, children.size());
    INode child = children.get(0);
    assertEquals(child.getLocalName(), metaChangeFile1.getName());
    // check snapshot copy of metaChangeFile1
    INodeFile metaChangeFile1SCopy = child.asFile();
    assertTrue(metaChangeFile1SCopy.isWithSnapshot());
    assertFalse(metaChangeFile1SCopy.isUnderConstruction());
    assertEquals(REPLICATION_1, metaChangeFile1SCopy.getFileReplication(Snapshot.CURRENT_STATE_ID));
    assertEquals(REPLICATION_1, metaChangeFile1SCopy.getFileReplication(snapshot1.getId()));
    assertEquals(REPLICATION, metaChangeFile1SCopy.getFileReplication(snapshot0.getId()));
}
Also used : Path(org.apache.hadoop.fs.Path) INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) INode(org.apache.hadoop.hdfs.server.namenode.INode) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) Test(org.junit.Test)

Aggregations

BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)84 Test (org.junit.Test)28 Path (org.apache.hadoop.fs.Path)27 Block (org.apache.hadoop.hdfs.protocol.Block)22 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)19 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)16 INodeFile (org.apache.hadoop.hdfs.server.namenode.INodeFile)14 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)13 IOException (java.io.IOException)11 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)11 BlockInfoContiguous (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous)11 BlockInfoStriped (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped)11 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)10 DatanodeStorageInfo (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo)7 Configuration (org.apache.hadoop.conf.Configuration)6 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)6 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)6 BlockStoragePolicy (org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)5 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)5 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)5