Search in sources :

Example 6 with INodeFile

use of org.apache.hadoop.hdfs.server.namenode.INodeFile in project hadoop by apache.

the class TestSnapshotDeletion method testDeleteCurrentFileDirectory.

/**
   * Test deleting a directory which is a descendant of a snapshottable
   * directory. In the test we need to cover the following cases:
   * 
   * <pre>
   * 1. Delete current INodeFile/INodeDirectory without taking any snapshot.
   * 2. Delete current INodeFile/INodeDirectory while snapshots have been taken 
   *    on ancestor(s).
   * 3. Delete current INodeFileWithSnapshot.
   * 4. Delete current INodeDirectoryWithSnapshot.
   * </pre>
   */
@Test(timeout = 300000)
public void testDeleteCurrentFileDirectory() throws Exception {
    // create a folder which will be deleted before taking snapshots
    Path deleteDir = new Path(subsub, "deleteDir");
    Path deleteFile = new Path(deleteDir, "deleteFile");
    // create a directory that we will not change during the whole process.
    Path noChangeDirParent = new Path(sub, "noChangeDirParent");
    Path noChangeDir = new Path(noChangeDirParent, "noChangeDir");
    // create a file that we will not change in the future
    Path noChangeFile = new Path(noChangeDir, "noChangeFile");
    DFSTestUtil.createFile(hdfs, deleteFile, BLOCKSIZE, REPLICATION, seed);
    DFSTestUtil.createFile(hdfs, noChangeFile, BLOCKSIZE, REPLICATION, seed);
    // we will change this file's metadata in the future
    Path metaChangeFile1 = new Path(subsub, "metaChangeFile1");
    DFSTestUtil.createFile(hdfs, metaChangeFile1, BLOCKSIZE, REPLICATION, seed);
    // another file, created under noChangeDir, whose metadata will be changed
    Path metaChangeFile2 = new Path(noChangeDir, "metaChangeFile2");
    DFSTestUtil.createFile(hdfs, metaChangeFile2, BLOCKSIZE, REPLICATION, seed);
    // Case 1: delete deleteDir before taking snapshots
    hdfs.setQuota(dir, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
    checkQuotaUsageComputation(dir, 10, BLOCKSIZE * REPLICATION * 4);
    hdfs.delete(deleteDir, true);
    checkQuotaUsageComputation(dir, 8, BLOCKSIZE * REPLICATION * 3);
    // create snapshot s0
    SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");
    // after creating snapshot s0, create a directory tempdir under dir and then
    // delete dir immediately
    Path tempDir = new Path(dir, "tempdir");
    Path tempFile = new Path(tempDir, "tempfile");
    DFSTestUtil.createFile(hdfs, tempFile, BLOCKSIZE, REPLICATION, seed);
    final INodeFile temp = TestSnapshotBlocksMap.assertBlockCollection(tempFile.toString(), 1, fsdir, blockmanager);
    BlockInfo[] blocks = temp.getBlocks();
    hdfs.delete(tempDir, true);
    // check dir's quota usage
    checkQuotaUsageComputation(dir, 8, BLOCKSIZE * REPLICATION * 3);
    // check blocks of tempFile
    for (BlockInfo b : blocks) {
        assertEquals(INVALID_INODE_ID, b.getBlockCollectionId());
    }
    // make a change: create a new file under subsub
    Path newFileAfterS0 = new Path(subsub, "newFile");
    DFSTestUtil.createFile(hdfs, newFileAfterS0, BLOCKSIZE, REPLICATION, seed);
    // further change: change the replicator factor of metaChangeFile
    hdfs.setReplication(metaChangeFile1, REPLICATION_1);
    hdfs.setReplication(metaChangeFile2, REPLICATION_1);
    // create snapshot s1
    SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
    // check dir's quota usage
    checkQuotaUsageComputation(dir, 9L, BLOCKSIZE * REPLICATION * 4);
    // get two snapshots for later use
    Snapshot snapshot0 = fsdir.getINode(dir.toString()).asDirectory().getSnapshot(DFSUtil.string2Bytes("s0"));
    Snapshot snapshot1 = fsdir.getINode(dir.toString()).asDirectory().getSnapshot(DFSUtil.string2Bytes("s1"));
    // Case 2 + Case 3: delete noChangeDirParent, noChangeFile, and
    // metaChangeFile2. Note that when we directly delete a directory, the 
    // directory will be converted to an INodeDirectoryWithSnapshot. To make
    // sure the deletion goes through an INodeDirectory, we delete the parent
    // of noChangeDir
    hdfs.delete(noChangeDirParent, true);
    // while deletion, we add a diff for metaChangeFile2 as its snapshot copy
    // for s1, we also add diffs for both sub and noChangeDirParent
    checkQuotaUsageComputation(dir, 9L, BLOCKSIZE * REPLICATION * 4);
    // check the snapshot copy of noChangeDir 
    Path snapshotNoChangeDir = SnapshotTestHelper.getSnapshotPath(dir, "s1", sub.getName() + "/" + noChangeDirParent.getName() + "/" + noChangeDir.getName());
    INodeDirectory snapshotNode = (INodeDirectory) fsdir.getINode(snapshotNoChangeDir.toString());
    // should still be an INodeDirectory
    assertEquals(INodeDirectory.class, snapshotNode.getClass());
    ReadOnlyList<INode> children = snapshotNode.getChildrenList(Snapshot.CURRENT_STATE_ID);
    // check 2 children: noChangeFile and metaChangeFile2
    assertEquals(2, children.size());
    INode noChangeFileSCopy = children.get(1);
    assertEquals(noChangeFile.getName(), noChangeFileSCopy.getLocalName());
    assertEquals(INodeFile.class, noChangeFileSCopy.getClass());
    TestSnapshotBlocksMap.assertBlockCollection(new Path(snapshotNoChangeDir, noChangeFileSCopy.getLocalName()).toString(), 1, fsdir, blockmanager);
    INodeFile metaChangeFile2SCopy = children.get(0).asFile();
    assertEquals(metaChangeFile2.getName(), metaChangeFile2SCopy.getLocalName());
    assertTrue(metaChangeFile2SCopy.isWithSnapshot());
    assertFalse(metaChangeFile2SCopy.isUnderConstruction());
    TestSnapshotBlocksMap.assertBlockCollection(new Path(snapshotNoChangeDir, metaChangeFile2SCopy.getLocalName()).toString(), 1, fsdir, blockmanager);
    // check the replication factor of metaChangeFile2SCopy
    assertEquals(REPLICATION_1, metaChangeFile2SCopy.getFileReplication(Snapshot.CURRENT_STATE_ID));
    assertEquals(REPLICATION_1, metaChangeFile2SCopy.getFileReplication(snapshot1.getId()));
    assertEquals(REPLICATION, metaChangeFile2SCopy.getFileReplication(snapshot0.getId()));
    // Case 4: delete directory sub
    // before deleting sub, we first create a new file under sub
    Path newFile = new Path(sub, "newFile");
    DFSTestUtil.createFile(hdfs, newFile, BLOCKSIZE, REPLICATION, seed);
    final INodeFile newFileNode = TestSnapshotBlocksMap.assertBlockCollection(newFile.toString(), 1, fsdir, blockmanager);
    blocks = newFileNode.getBlocks();
    checkQuotaUsageComputation(dir, 10L, BLOCKSIZE * REPLICATION * 5);
    hdfs.delete(sub, true);
    // while deletion, we add diff for subsub and metaChangeFile1, and remove
    // newFile
    checkQuotaUsageComputation(dir, 9L, BLOCKSIZE * REPLICATION * 4);
    for (BlockInfo b : blocks) {
        assertEquals(INVALID_INODE_ID, b.getBlockCollectionId());
    }
    // make sure the whole subtree of sub is stored correctly in snapshot
    Path snapshotSub = SnapshotTestHelper.getSnapshotPath(dir, "s1", sub.getName());
    INodeDirectory snapshotNode4Sub = fsdir.getINode(snapshotSub.toString()).asDirectory();
    assertTrue(snapshotNode4Sub.isWithSnapshot());
    // the snapshot copy of sub has only one child subsub.
    // newFile should have been destroyed
    assertEquals(1, snapshotNode4Sub.getChildrenList(Snapshot.CURRENT_STATE_ID).size());
    // but should have two children, subsub and noChangeDir, when s1 was taken  
    assertEquals(2, snapshotNode4Sub.getChildrenList(snapshot1.getId()).size());
    // check the snapshot copy of subsub, which is contained in the subtree of
    // sub's snapshot copy
    INode snapshotNode4Subsub = snapshotNode4Sub.getChildrenList(Snapshot.CURRENT_STATE_ID).get(0);
    assertTrue(snapshotNode4Subsub.asDirectory().isWithSnapshot());
    assertTrue(snapshotNode4Sub == snapshotNode4Subsub.getParent());
    // check the children of subsub
    INodeDirectory snapshotSubsubDir = (INodeDirectory) snapshotNode4Subsub;
    children = snapshotSubsubDir.getChildrenList(Snapshot.CURRENT_STATE_ID);
    assertEquals(2, children.size());
    assertEquals(children.get(0).getLocalName(), metaChangeFile1.getName());
    assertEquals(children.get(1).getLocalName(), newFileAfterS0.getName());
    // only one child before snapshot s0 
    children = snapshotSubsubDir.getChildrenList(snapshot0.getId());
    assertEquals(1, children.size());
    INode child = children.get(0);
    assertEquals(child.getLocalName(), metaChangeFile1.getName());
    // check snapshot copy of metaChangeFile1
    INodeFile metaChangeFile1SCopy = child.asFile();
    assertTrue(metaChangeFile1SCopy.isWithSnapshot());
    assertFalse(metaChangeFile1SCopy.isUnderConstruction());
    assertEquals(REPLICATION_1, metaChangeFile1SCopy.getFileReplication(Snapshot.CURRENT_STATE_ID));
    assertEquals(REPLICATION_1, metaChangeFile1SCopy.getFileReplication(snapshot1.getId()));
    assertEquals(REPLICATION, metaChangeFile1SCopy.getFileReplication(snapshot0.getId()));
}
Also used : Path(org.apache.hadoop.fs.Path) INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) INode(org.apache.hadoop.hdfs.server.namenode.INode) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) Test(org.junit.Test)

Example 7 with INodeFile

use of org.apache.hadoop.hdfs.server.namenode.INodeFile in project hadoop by apache.

the class TestSnapshotReplication method checkFileReplication.

/**
   * Check the replication of a given file.
   *
   * @param file The given file
   * @param replication The expected replication number
   * @param blockReplication The expected replication number for the block
   * @throws Exception
   */
private void checkFileReplication(Path file, short replication, short blockReplication) throws Exception {
    // Get FileStatus of file1, and identify the replication number of file1.
    // Note that the replication number in FileStatus was derived from
    // INodeFile#getFileReplication().
    short fileReplication = hdfs.getFileStatus(file1).getReplication();
    assertEquals(replication, fileReplication);
    // Check the correctness of getPreferredBlockReplication()
    INode inode = fsdir.getINode(file1.toString());
    assertTrue(inode instanceof INodeFile);
    for (BlockInfo b : inode.asFile().getBlocks()) {
        assertEquals(blockReplication, b.getReplication());
    }
}
Also used : INode(org.apache.hadoop.hdfs.server.namenode.INode) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile)

Example 8 with INodeFile

use of org.apache.hadoop.hdfs.server.namenode.INodeFile in project hadoop by apache.

the class TestPendingReconstruction method testBlockReceived.

/**
   * Test if DatanodeProtocol#blockReceivedAndDeleted can correctly update the
   * pending reconstruction. Also make sure the blockReceivedAndDeleted call is
   * idempotent to the pending reconstruction.
   */
@Test
public void testBlockReceived() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_COUNT).build();
        cluster.waitActive();
        DistributedFileSystem hdfs = cluster.getFileSystem();
        FSNamesystem fsn = cluster.getNamesystem();
        BlockManager blkManager = fsn.getBlockManager();
        final String file = "/tmp.txt";
        final Path filePath = new Path(file);
        short replFactor = 1;
        DFSTestUtil.createFile(hdfs, filePath, 1024L, replFactor, 0);
        // temporarily stop the heartbeat
        ArrayList<DataNode> datanodes = cluster.getDataNodes();
        for (int i = 0; i < DATANODE_COUNT; i++) {
            DataNodeTestUtils.setHeartbeatsDisabledForTests(datanodes.get(i), true);
        }
        hdfs.setReplication(filePath, (short) DATANODE_COUNT);
        BlockManagerTestUtil.computeAllPendingWork(blkManager);
        assertEquals(1, blkManager.pendingReconstruction.size());
        INodeFile fileNode = fsn.getFSDirectory().getINode4Write(file).asFile();
        BlockInfo[] blocks = fileNode.getBlocks();
        assertEquals(DATANODE_COUNT - 1, blkManager.pendingReconstruction.getNumReplicas(blocks[0]));
        LocatedBlock locatedBlock = hdfs.getClient().getLocatedBlocks(file, 0).get(0);
        DatanodeInfo existingDn = (locatedBlock.getLocations())[0];
        int reportDnNum = 0;
        String poolId = cluster.getNamesystem().getBlockPoolId();
        // report to NN
        for (int i = 0; i < DATANODE_COUNT && reportDnNum < 2; i++) {
            if (!datanodes.get(i).getDatanodeId().equals(existingDn)) {
                DatanodeRegistration dnR = datanodes.get(i).getDNRegistrationForBP(poolId);
                StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(new DatanodeStorage("Fake-storage-ID-Ignored"), new ReceivedDeletedBlockInfo[] { new ReceivedDeletedBlockInfo(blocks[0], BlockStatus.RECEIVED_BLOCK, "") }) };
                cluster.getNameNodeRpc().blockReceivedAndDeleted(dnR, poolId, report);
                reportDnNum++;
            }
        }
        // IBRs are async, make sure the NN processes all of them.
        cluster.getNamesystem().getBlockManager().flushBlockOps();
        assertEquals(DATANODE_COUNT - 3, blkManager.pendingReconstruction.getNumReplicas(blocks[0]));
        // let the same datanodes report again
        for (int i = 0; i < DATANODE_COUNT && reportDnNum < 2; i++) {
            if (!datanodes.get(i).getDatanodeId().equals(existingDn)) {
                DatanodeRegistration dnR = datanodes.get(i).getDNRegistrationForBP(poolId);
                StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(new DatanodeStorage("Fake-storage-ID-Ignored"), new ReceivedDeletedBlockInfo[] { new ReceivedDeletedBlockInfo(blocks[0], BlockStatus.RECEIVED_BLOCK, "") }) };
                cluster.getNameNodeRpc().blockReceivedAndDeleted(dnR, poolId, report);
                reportDnNum++;
            }
        }
        cluster.getNamesystem().getBlockManager().flushBlockOps();
        assertEquals(DATANODE_COUNT - 3, blkManager.pendingReconstruction.getNumReplicas(blocks[0]));
        // re-enable heartbeat for the datanode that has data
        for (int i = 0; i < DATANODE_COUNT; i++) {
            DataNodeTestUtils.setHeartbeatsDisabledForTests(datanodes.get(i), false);
            DataNodeTestUtils.triggerHeartbeat(datanodes.get(i));
        }
        Thread.sleep(5000);
        assertEquals(0, blkManager.pendingReconstruction.size());
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) StorageReceivedDeletedBlocks(org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) Test(org.junit.Test)

Example 9 with INodeFile

use of org.apache.hadoop.hdfs.server.namenode.INodeFile in project hadoop by apache.

the class TestRenameWithSnapshots method testRenameFileAndDeleteSnapshot.

/**
   * Test renaming a file and then delete snapshots.
   */
@Test
public void testRenameFileAndDeleteSnapshot() throws Exception {
    final Path sdir1 = new Path("/dir1");
    final Path sdir2 = new Path("/dir2");
    hdfs.mkdirs(sdir1);
    hdfs.mkdirs(sdir2);
    final Path foo = new Path(sdir2, "foo");
    DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
    SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
    SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
    hdfs.createSnapshot(sdir1, "s3");
    final Path newfoo = new Path(sdir1, "foo");
    hdfs.rename(foo, newfoo);
    hdfs.setReplication(newfoo, REPL_1);
    hdfs.createSnapshot(sdir1, "s4");
    hdfs.setReplication(newfoo, REPL_2);
    FileStatus status = hdfs.getFileStatus(newfoo);
    assertEquals(REPL_2, status.getReplication());
    final Path foo_s4 = SnapshotTestHelper.getSnapshotPath(sdir1, "s4", "foo");
    status = hdfs.getFileStatus(foo_s4);
    assertEquals(REPL_1, status.getReplication());
    hdfs.createSnapshot(sdir1, "s5");
    final Path foo_s5 = SnapshotTestHelper.getSnapshotPath(sdir1, "s5", "foo");
    status = hdfs.getFileStatus(foo_s5);
    assertEquals(REPL_2, status.getReplication());
    // delete snapshot s5.
    hdfs.deleteSnapshot(sdir1, "s5");
    restartClusterAndCheckImage(true);
    assertFalse(hdfs.exists(foo_s5));
    status = hdfs.getFileStatus(foo_s4);
    assertEquals(REPL_1, status.getReplication());
    // delete snapshot s4.
    hdfs.deleteSnapshot(sdir1, "s4");
    assertFalse(hdfs.exists(foo_s4));
    Path foo_s3 = SnapshotTestHelper.getSnapshotPath(sdir1, "s3", "foo");
    assertFalse(hdfs.exists(foo_s3));
    foo_s3 = SnapshotTestHelper.getSnapshotPath(sdir2, "s3", "foo");
    assertFalse(hdfs.exists(foo_s3));
    final Path foo_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2", "foo");
    assertTrue(hdfs.exists(foo_s2));
    status = hdfs.getFileStatus(foo_s2);
    assertEquals(REPL, status.getReplication());
    INodeFile snode = fsdir.getINode(newfoo.toString()).asFile();
    assertEquals(1, snode.getDiffs().asList().size());
    INodeDirectory sdir2Node = fsdir.getINode(sdir2.toString()).asDirectory();
    Snapshot s2 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
    assertEquals(s2.getId(), snode.getDiffs().getLastSnapshotId());
    // restart cluster
    restartClusterAndCheckImage(true);
    // delete snapshot s2.
    hdfs.deleteSnapshot(sdir2, "s2");
    assertFalse(hdfs.exists(foo_s2));
    // restart the cluster and check fsimage
    restartClusterAndCheckImage(true);
    hdfs.deleteSnapshot(sdir1, "s3");
    restartClusterAndCheckImage(true);
    hdfs.deleteSnapshot(sdir1, "s1");
    restartClusterAndCheckImage(true);
}
Also used : Path(org.apache.hadoop.fs.Path) INodesInPath(org.apache.hadoop.hdfs.server.namenode.INodesInPath) INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) FileStatus(org.apache.hadoop.fs.FileStatus) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) Test(org.junit.Test)

Example 10 with INodeFile

use of org.apache.hadoop.hdfs.server.namenode.INodeFile in project hadoop by apache.

the class TestINodeFileUnderConstructionWithSnapshot method testSnapshotAfterAppending.

/**
   * Test snapshot after file appending
   */
@Test(timeout = 60000)
public void testSnapshotAfterAppending() throws Exception {
    Path file = new Path(dir, "file");
    // 1. create snapshot --> create file --> append
    SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");
    DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed);
    DFSTestUtil.appendFile(hdfs, file, BLOCKSIZE);
    INodeFile fileNode = (INodeFile) fsdir.getINode(file.toString());
    // 2. create snapshot --> modify the file --> append
    hdfs.createSnapshot(dir, "s1");
    hdfs.setReplication(file, (short) (REPLICATION - 1));
    DFSTestUtil.appendFile(hdfs, file, BLOCKSIZE);
    // check corresponding inodes
    fileNode = (INodeFile) fsdir.getINode(file.toString());
    assertEquals(REPLICATION - 1, fileNode.getFileReplication());
    assertEquals(BLOCKSIZE * 3, fileNode.computeFileSize());
    // 3. create snapshot --> append
    hdfs.createSnapshot(dir, "s2");
    DFSTestUtil.appendFile(hdfs, file, BLOCKSIZE);
    // check corresponding inodes
    fileNode = (INodeFile) fsdir.getINode(file.toString());
    assertEquals(REPLICATION - 1, fileNode.getFileReplication());
    assertEquals(BLOCKSIZE * 4, fileNode.computeFileSize());
}
Also used : Path(org.apache.hadoop.fs.Path) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) Test(org.junit.Test)

Aggregations

INodeFile (org.apache.hadoop.hdfs.server.namenode.INodeFile)35 Path (org.apache.hadoop.fs.Path)24 Test (org.junit.Test)23 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)14 INodeDirectory (org.apache.hadoop.hdfs.server.namenode.INodeDirectory)10 INodesInPath (org.apache.hadoop.hdfs.server.namenode.INodesInPath)9 INode (org.apache.hadoop.hdfs.server.namenode.INode)7 FileStatus (org.apache.hadoop.fs.FileStatus)6 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)6 INodeReference (org.apache.hadoop.hdfs.server.namenode.INodeReference)5 TestINodeFile (org.apache.hadoop.hdfs.server.namenode.TestINodeFile)5 IOException (java.io.IOException)4 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)4 Block (org.apache.hadoop.hdfs.protocol.Block)4 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)4 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)4 ReceivedDeletedBlockInfo (org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo)4 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)3 FSDirectory (org.apache.hadoop.hdfs.server.namenode.FSDirectory)3 DirectoryDiff (org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff)3