Search in sources :

Example 16 with INodeFile

use of org.apache.hadoop.hdfs.server.namenode.INodeFile in project hadoop by apache.

the class TestReconstructStripedBlocksWithRackAwareness method testReconstructForNotEnoughRacks.

/**
   * When there are all the internal blocks available but they are not placed on
   * enough racks, NameNode should avoid normal decoding reconstruction but copy
   * an internal block to a new rack.
   *
   * In this test, we first need to create a scenario that a striped block has
   * all the internal blocks but distributed in <6 racks. Then we check if the
   * redundancy monitor can correctly schedule the reconstruction work for it.
   */
@Test
public void testReconstructForNotEnoughRacks() throws Exception {
    LOG.info("cluster hosts: {}, racks: {}", Arrays.asList(hosts), Arrays.asList(racks));
    conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, StripedFileTestUtil.getDefaultECPolicy().getName());
    cluster = new MiniDFSCluster.Builder(conf).racks(racks).hosts(hosts).numDataNodes(hosts.length).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
    fs.setErasureCodingPolicy(new Path("/"), StripedFileTestUtil.getDefaultECPolicy().getName());
    FSNamesystem fsn = cluster.getNamesystem();
    BlockManager bm = fsn.getBlockManager();
    MiniDFSCluster.DataNodeProperties lastHost = stopDataNode(hosts[hosts.length - 1]);
    final Path file = new Path("/foo");
    // the file's block is in 9 dn but 5 racks
    DFSTestUtil.createFile(fs, file, cellSize * dataBlocks * 2, (short) 1, 0L);
    Assert.assertEquals(0, bm.numOfUnderReplicatedBlocks());
    final INodeFile fileNode = fsn.getFSDirectory().getINode4Write(file.toString()).asFile();
    BlockInfoStriped blockInfo = (BlockInfoStriped) fileNode.getLastBlock();
    // we now should have 9 internal blocks distributed in 5 racks
    Set<String> rackSet = new HashSet<>();
    for (DatanodeStorageInfo storage : blockInfo.storages) {
        rackSet.add(storage.getDatanodeDescriptor().getNetworkLocation());
    }
    Assert.assertEquals(dataBlocks - 1, rackSet.size());
    // restart the stopped datanode
    cluster.restartDataNode(lastHost);
    cluster.waitActive();
    // make sure we have 6 racks again
    NetworkTopology topology = bm.getDatanodeManager().getNetworkTopology();
    Assert.assertEquals(hosts.length, topology.getNumOfLeaves());
    Assert.assertEquals(dataBlocks, topology.getNumOfRacks());
    // pause all the heartbeats
    for (DataNode dn : cluster.getDataNodes()) {
        DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
    }
    fsn.writeLock();
    try {
        bm.processMisReplicatedBlocks();
    } finally {
        fsn.writeUnlock();
    }
    // check if redundancy monitor correctly schedule the reconstruction work.
    boolean scheduled = false;
    for (int i = 0; i < 5; i++) {
        // retry 5 times
        for (DatanodeStorageInfo storage : blockInfo.storages) {
            if (storage != null) {
                DatanodeDescriptor dn = storage.getDatanodeDescriptor();
                Assert.assertEquals(0, dn.getNumberOfBlocksToBeErasureCoded());
                if (dn.getNumberOfBlocksToBeReplicated() == 1) {
                    scheduled = true;
                }
            }
        }
        if (scheduled) {
            break;
        }
        Thread.sleep(1000);
    }
    Assert.assertTrue(scheduled);
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) NetworkTopology(org.apache.hadoop.net.NetworkTopology) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 17 with INodeFile

use of org.apache.hadoop.hdfs.server.namenode.INodeFile in project hadoop by apache.

the class TestReplicationPolicy method testAddStoredBlockDoesNotCauseSkippedReplication.

@Test(timeout = 60000)
public void testAddStoredBlockDoesNotCauseSkippedReplication() throws IOException {
    FSNamesystem mockNS = mock(FSNamesystem.class);
    when(mockNS.hasWriteLock()).thenReturn(true);
    when(mockNS.hasReadLock()).thenReturn(true);
    BlockManager bm = new BlockManager(mockNS, false, new HdfsConfiguration());
    LowRedundancyBlocks lowRedundancyBlocks = bm.neededReconstruction;
    BlockInfo block1 = genBlockInfo(ThreadLocalRandom.current().nextLong());
    BlockInfo block2 = genBlockInfo(ThreadLocalRandom.current().nextLong());
    // Adding QUEUE_LOW_REDUNDANCY block
    lowRedundancyBlocks.add(block1, 0, 0, 1, 1);
    // Adding QUEUE_LOW_REDUNDANCY block
    lowRedundancyBlocks.add(block2, 0, 0, 1, 1);
    List<List<BlockInfo>> chosenBlocks;
    // Choose 1 block from lowRedundancyBlocks. Then it should pick 1 block
    // from QUEUE_VERY_LOW_REDUNDANCY.
    chosenBlocks = lowRedundancyBlocks.chooseLowRedundancyBlocks(1);
    assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0);
    // Adding this block collection to the BlockManager, so that when we add
    // block under construction, the BlockManager will realize the expected
    // replication has been achieved and remove it from the low redundancy
    // queue.
    BlockInfoContiguous info = new BlockInfoContiguous(block1, (short) 1);
    info.convertToBlockUnderConstruction(BlockUCState.UNDER_CONSTRUCTION, null);
    info.setBlockCollectionId(1000L);
    final INodeFile file = TestINodeFile.createINodeFile(1000L);
    when(mockNS.getBlockCollection(1000L)).thenReturn(file);
    bm.addBlockCollection(info, file);
    // Adding this block will increase its current replication, and that will
    // remove it from the queue.
    bm.addStoredBlockUnderConstruction(new StatefulBlockInfo(info, info, ReplicaState.FINALIZED), storages[0]);
    // Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block
    // from QUEUE_VERY_LOW_REDUNDANCY.
    // This block remains and should not be skipped over.
    chosenBlocks = lowRedundancyBlocks.chooseLowRedundancyBlocks(1);
    assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0);
}
Also used : StatefulBlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.StatefulBlockInfo) StatefulBlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.StatefulBlockInfo) List(java.util.List) ArrayList(java.util.ArrayList) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) TestINodeFile(org.apache.hadoop.hdfs.server.namenode.TestINodeFile) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) Test(org.junit.Test)

Example 18 with INodeFile

use of org.apache.hadoop.hdfs.server.namenode.INodeFile in project hadoop by apache.

the class TestFileWithSnapshotFeature method testUpdateQuotaAndCollectBlocks.

@Test
public void testUpdateQuotaAndCollectBlocks() {
    FileDiffList diffs = new FileDiffList();
    FileWithSnapshotFeature sf = new FileWithSnapshotFeature(diffs);
    FileDiff diff = mock(FileDiff.class);
    BlockStoragePolicySuite bsps = mock(BlockStoragePolicySuite.class);
    BlockStoragePolicy bsp = mock(BlockStoragePolicy.class);
    BlockInfo[] blocks = new BlockInfo[] { new BlockInfoContiguous(new Block(1, BLOCK_SIZE, 1), REPL_1) };
    BlockManager bm = mock(BlockManager.class);
    // No snapshot
    INodeFile file = mock(INodeFile.class);
    when(file.getFileWithSnapshotFeature()).thenReturn(sf);
    when(file.getBlocks()).thenReturn(blocks);
    when(file.getStoragePolicyID()).thenReturn((byte) 1);
    Whitebox.setInternalState(file, "header", (long) REPL_1 << 48);
    when(file.getPreferredBlockReplication()).thenReturn(REPL_1);
    when(bsps.getPolicy(anyByte())).thenReturn(bsp);
    INode.BlocksMapUpdateInfo collectedBlocks = mock(INode.BlocksMapUpdateInfo.class);
    ArrayList<INode> removedINodes = new ArrayList<>();
    INode.ReclaimContext ctx = new INode.ReclaimContext(bsps, collectedBlocks, removedINodes, null);
    sf.updateQuotaAndCollectBlocks(ctx, file, diff);
    QuotaCounts counts = ctx.quotaDelta().getCountsCopy();
    Assert.assertEquals(0, counts.getStorageSpace());
    Assert.assertTrue(counts.getTypeSpaces().allLessOrEqual(0));
    // INode only exists in the snapshot
    INodeFile snapshotINode = mock(INodeFile.class);
    Whitebox.setInternalState(snapshotINode, "header", (long) REPL_3 << 48);
    Whitebox.setInternalState(diff, "snapshotINode", snapshotINode);
    when(diff.getSnapshotINode()).thenReturn(snapshotINode);
    when(bsp.chooseStorageTypes(REPL_1)).thenReturn(Lists.newArrayList(SSD));
    when(bsp.chooseStorageTypes(REPL_3)).thenReturn(Lists.newArrayList(DISK));
    blocks[0].setReplication(REPL_3);
    sf.updateQuotaAndCollectBlocks(ctx, file, diff);
    counts = ctx.quotaDelta().getCountsCopy();
    Assert.assertEquals((REPL_3 - REPL_1) * BLOCK_SIZE, counts.getStorageSpace());
    Assert.assertEquals(BLOCK_SIZE, counts.getTypeSpaces().get(DISK));
    Assert.assertEquals(-BLOCK_SIZE, counts.getTypeSpaces().get(SSD));
}
Also used : BlockStoragePolicySuite(org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite) BlockInfoContiguous(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous) INode(org.apache.hadoop.hdfs.server.namenode.INode) ArrayList(java.util.ArrayList) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy) Block(org.apache.hadoop.hdfs.protocol.Block) QuotaCounts(org.apache.hadoop.hdfs.server.namenode.QuotaCounts) Test(org.junit.Test)

Example 19 with INodeFile

use of org.apache.hadoop.hdfs.server.namenode.INodeFile in project hadoop by apache.

the class TestINodeFileUnderConstructionWithSnapshot method testSnapshotWhileAppending.

/**
   * Test snapshot during file appending, before the corresponding
   * {@link FSDataOutputStream} instance closes.
   */
@Test(timeout = 60000)
public void testSnapshotWhileAppending() throws Exception {
    Path file = new Path(dir, "file");
    DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed);
    // 1. append without closing stream --> create snapshot
    HdfsDataOutputStream out = appendFileWithoutClosing(file, BLOCKSIZE);
    out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
    SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");
    out.close();
    // check: an INodeFileUnderConstructionWithSnapshot should be stored into s0's
    // deleted list, with size BLOCKSIZE*2
    INodeFile fileNode = (INodeFile) fsdir.getINode(file.toString());
    assertEquals(BLOCKSIZE * 2, fileNode.computeFileSize());
    INodeDirectory dirNode = fsdir.getINode(dir.toString()).asDirectory();
    DirectoryDiff last = dirNode.getDiffs().getLast();
    // 2. append without closing stream
    out = appendFileWithoutClosing(file, BLOCKSIZE);
    out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
    // re-check nodeInDeleted_S0
    dirNode = fsdir.getINode(dir.toString()).asDirectory();
    assertEquals(BLOCKSIZE * 2, fileNode.computeFileSize(last.getSnapshotId()));
    // 3. take snapshot --> close stream
    hdfs.createSnapshot(dir, "s1");
    out.close();
    // check: an INodeFileUnderConstructionWithSnapshot with size BLOCKSIZE*3 should
    // have been stored in s1's deleted list
    fileNode = (INodeFile) fsdir.getINode(file.toString());
    dirNode = fsdir.getINode(dir.toString()).asDirectory();
    last = dirNode.getDiffs().getLast();
    assertTrue(fileNode.isWithSnapshot());
    assertEquals(BLOCKSIZE * 3, fileNode.computeFileSize(last.getSnapshotId()));
    // 4. modify file --> append without closing stream --> take snapshot -->
    // close stream
    hdfs.setReplication(file, (short) (REPLICATION - 1));
    out = appendFileWithoutClosing(file, BLOCKSIZE);
    hdfs.createSnapshot(dir, "s2");
    out.close();
    // re-check the size of nodeInDeleted_S1
    assertEquals(BLOCKSIZE * 3, fileNode.computeFileSize(last.getSnapshotId()));
}
Also used : Path(org.apache.hadoop.fs.Path) INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) DirectoryDiff(org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff) HdfsDataOutputStream(org.apache.hadoop.hdfs.client.HdfsDataOutputStream) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) Test(org.junit.Test)

Example 20 with INodeFile

use of org.apache.hadoop.hdfs.server.namenode.INodeFile in project hadoop by apache.

the class TestRenameWithSnapshots method testRenameMoreThanOnceAcrossSnapDirs_2.

/**
   * Test rename a dir multiple times across snapshottable directories: 
   * /dir1/foo -> /dir2/foo -> /dir3/foo -> /dir2/foo -> /dir1/foo
   * 
   * Create snapshots after each rename.
   */
@Test
public void testRenameMoreThanOnceAcrossSnapDirs_2() throws Exception {
    final Path sdir1 = new Path("/dir1");
    final Path sdir2 = new Path("/dir2");
    final Path sdir3 = new Path("/dir3");
    hdfs.mkdirs(sdir1);
    hdfs.mkdirs(sdir2);
    hdfs.mkdirs(sdir3);
    final Path foo_dir1 = new Path(sdir1, "foo");
    final Path bar1_dir1 = new Path(foo_dir1, "bar1");
    final Path bar_dir1 = new Path(sdir1, "bar");
    DFSTestUtil.createFile(hdfs, bar1_dir1, BLOCKSIZE, REPL, SEED);
    DFSTestUtil.createFile(hdfs, bar_dir1, BLOCKSIZE, REPL, SEED);
    SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
    SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
    SnapshotTestHelper.createSnapshot(hdfs, sdir3, "s3");
    // 1. /dir1/foo -> /dir2/foo, /dir1/bar -> /dir2/bar
    final Path foo_dir2 = new Path(sdir2, "foo");
    hdfs.rename(foo_dir1, foo_dir2);
    final Path bar_dir2 = new Path(sdir2, "bar");
    hdfs.rename(bar_dir1, bar_dir2);
    // modification on /dir2/foo and /dir2/bar
    final Path bar1_dir2 = new Path(foo_dir2, "bar1");
    hdfs.setReplication(bar1_dir2, REPL_1);
    hdfs.setReplication(bar_dir2, REPL_1);
    // restart the cluster and check fsimage
    restartClusterAndCheckImage(true);
    // create snapshots
    SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s11");
    SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s22");
    SnapshotTestHelper.createSnapshot(hdfs, sdir3, "s33");
    // 2. /dir2/foo -> /dir3/foo
    final Path foo_dir3 = new Path(sdir3, "foo");
    hdfs.rename(foo_dir2, foo_dir3);
    final Path bar_dir3 = new Path(sdir3, "bar");
    hdfs.rename(bar_dir2, bar_dir3);
    // modification on /dir3/foo
    final Path bar1_dir3 = new Path(foo_dir3, "bar1");
    hdfs.setReplication(bar1_dir3, REPL_2);
    hdfs.setReplication(bar_dir3, REPL_2);
    // restart the cluster and check fsimage
    restartClusterAndCheckImage(true);
    // create snapshots
    SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s111");
    SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s222");
    SnapshotTestHelper.createSnapshot(hdfs, sdir3, "s333");
    // check
    final Path bar1_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1", "foo/bar1");
    final Path bar1_s22 = SnapshotTestHelper.getSnapshotPath(sdir2, "s22", "foo/bar1");
    final Path bar1_s333 = SnapshotTestHelper.getSnapshotPath(sdir3, "s333", "foo/bar1");
    final Path bar_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1", "bar");
    final Path bar_s22 = SnapshotTestHelper.getSnapshotPath(sdir2, "s22", "bar");
    final Path bar_s333 = SnapshotTestHelper.getSnapshotPath(sdir3, "s333", "bar");
    assertTrue(hdfs.exists(bar1_s1));
    assertTrue(hdfs.exists(bar1_s22));
    assertTrue(hdfs.exists(bar1_s333));
    assertTrue(hdfs.exists(bar_s1));
    assertTrue(hdfs.exists(bar_s22));
    assertTrue(hdfs.exists(bar_s333));
    FileStatus statusBar1 = hdfs.getFileStatus(bar1_s1);
    assertEquals(REPL, statusBar1.getReplication());
    statusBar1 = hdfs.getFileStatus(bar1_dir3);
    assertEquals(REPL_2, statusBar1.getReplication());
    statusBar1 = hdfs.getFileStatus(bar1_s22);
    assertEquals(REPL_1, statusBar1.getReplication());
    statusBar1 = hdfs.getFileStatus(bar1_s333);
    assertEquals(REPL_2, statusBar1.getReplication());
    FileStatus statusBar = hdfs.getFileStatus(bar_s1);
    assertEquals(REPL, statusBar.getReplication());
    statusBar = hdfs.getFileStatus(bar_dir3);
    assertEquals(REPL_2, statusBar.getReplication());
    statusBar = hdfs.getFileStatus(bar_s22);
    assertEquals(REPL_1, statusBar.getReplication());
    statusBar = hdfs.getFileStatus(bar_s333);
    assertEquals(REPL_2, statusBar.getReplication());
    // 3. /dir3/foo -> /dir2/foo
    hdfs.rename(foo_dir3, foo_dir2);
    hdfs.rename(bar_dir3, bar_dir2);
    // modification on /dir2/foo
    hdfs.setReplication(bar1_dir2, REPL);
    hdfs.setReplication(bar_dir2, REPL);
    // restart the cluster and check fsimage
    restartClusterAndCheckImage(true);
    // create snapshots
    SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1111");
    SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2222");
    // check
    final Path bar1_s2222 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2222", "foo/bar1");
    final Path bar_s2222 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2222", "bar");
    assertTrue(hdfs.exists(bar1_s1));
    assertTrue(hdfs.exists(bar1_s22));
    assertTrue(hdfs.exists(bar1_s333));
    assertTrue(hdfs.exists(bar1_s2222));
    assertTrue(hdfs.exists(bar_s1));
    assertTrue(hdfs.exists(bar_s22));
    assertTrue(hdfs.exists(bar_s333));
    assertTrue(hdfs.exists(bar_s2222));
    statusBar1 = hdfs.getFileStatus(bar1_s1);
    assertEquals(REPL, statusBar1.getReplication());
    statusBar1 = hdfs.getFileStatus(bar1_dir2);
    assertEquals(REPL, statusBar1.getReplication());
    statusBar1 = hdfs.getFileStatus(bar1_s22);
    assertEquals(REPL_1, statusBar1.getReplication());
    statusBar1 = hdfs.getFileStatus(bar1_s333);
    assertEquals(REPL_2, statusBar1.getReplication());
    statusBar1 = hdfs.getFileStatus(bar1_s2222);
    assertEquals(REPL, statusBar1.getReplication());
    statusBar = hdfs.getFileStatus(bar_s1);
    assertEquals(REPL, statusBar.getReplication());
    statusBar = hdfs.getFileStatus(bar_dir2);
    assertEquals(REPL, statusBar.getReplication());
    statusBar = hdfs.getFileStatus(bar_s22);
    assertEquals(REPL_1, statusBar.getReplication());
    statusBar = hdfs.getFileStatus(bar_s333);
    assertEquals(REPL_2, statusBar.getReplication());
    statusBar = hdfs.getFileStatus(bar_s2222);
    assertEquals(REPL, statusBar.getReplication());
    // 4. /dir2/foo -> /dir1/foo
    hdfs.rename(foo_dir2, foo_dir1);
    hdfs.rename(bar_dir2, bar_dir1);
    // check the internal details
    INodeDirectory sdir1Node = fsdir.getINode(sdir1.toString()).asDirectory();
    INodeDirectory sdir2Node = fsdir.getINode(sdir2.toString()).asDirectory();
    INodeDirectory sdir3Node = fsdir.getINode(sdir3.toString()).asDirectory();
    INodeReference fooRef = fsdir.getINode4Write(foo_dir1.toString()).asReference();
    INodeReference.WithCount fooWithCount = (WithCount) fooRef.getReferredINode();
    // 5 references: s1, s22, s333, s2222, current tree of sdir1
    assertEquals(5, fooWithCount.getReferenceCount());
    INodeDirectory foo = fooWithCount.asDirectory();
    List<DirectoryDiff> fooDiffs = foo.getDiffs().asList();
    assertEquals(4, fooDiffs.size());
    Snapshot s2222 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2222"));
    Snapshot s333 = sdir3Node.getSnapshot(DFSUtil.string2Bytes("s333"));
    Snapshot s22 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s22"));
    Snapshot s1 = sdir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
    assertEquals(s2222.getId(), fooDiffs.get(3).getSnapshotId());
    assertEquals(s333.getId(), fooDiffs.get(2).getSnapshotId());
    assertEquals(s22.getId(), fooDiffs.get(1).getSnapshotId());
    assertEquals(s1.getId(), fooDiffs.get(0).getSnapshotId());
    INodeFile bar1 = fsdir.getINode4Write(bar1_dir1.toString()).asFile();
    List<FileDiff> bar1Diffs = bar1.getDiffs().asList();
    assertEquals(3, bar1Diffs.size());
    assertEquals(s333.getId(), bar1Diffs.get(2).getSnapshotId());
    assertEquals(s22.getId(), bar1Diffs.get(1).getSnapshotId());
    assertEquals(s1.getId(), bar1Diffs.get(0).getSnapshotId());
    INodeReference barRef = fsdir.getINode4Write(bar_dir1.toString()).asReference();
    INodeReference.WithCount barWithCount = (WithCount) barRef.getReferredINode();
    // 5 references: s1, s22, s333, s2222, current tree of sdir1
    assertEquals(5, barWithCount.getReferenceCount());
    INodeFile bar = barWithCount.asFile();
    List<FileDiff> barDiffs = bar.getDiffs().asList();
    assertEquals(4, barDiffs.size());
    assertEquals(s2222.getId(), barDiffs.get(3).getSnapshotId());
    assertEquals(s333.getId(), barDiffs.get(2).getSnapshotId());
    assertEquals(s22.getId(), barDiffs.get(1).getSnapshotId());
    assertEquals(s1.getId(), barDiffs.get(0).getSnapshotId());
    // restart the cluster and check fsimage
    restartClusterAndCheckImage(true);
    // delete foo
    hdfs.delete(foo_dir1, true);
    hdfs.delete(bar_dir1, true);
    // restart the cluster and check fsimage
    restartClusterAndCheckImage(true);
    // check
    final Path bar1_s1111 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1111", "foo/bar1");
    final Path bar_s1111 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1111", "bar");
    assertTrue(hdfs.exists(bar1_s1));
    assertTrue(hdfs.exists(bar1_s22));
    assertTrue(hdfs.exists(bar1_s333));
    assertTrue(hdfs.exists(bar1_s2222));
    assertFalse(hdfs.exists(bar1_s1111));
    assertTrue(hdfs.exists(bar_s1));
    assertTrue(hdfs.exists(bar_s22));
    assertTrue(hdfs.exists(bar_s333));
    assertTrue(hdfs.exists(bar_s2222));
    assertFalse(hdfs.exists(bar_s1111));
    final Path foo_s2222 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2222", "foo");
    fooRef = fsdir.getINode(foo_s2222.toString()).asReference();
    fooWithCount = (WithCount) fooRef.getReferredINode();
    assertEquals(4, fooWithCount.getReferenceCount());
    foo = fooWithCount.asDirectory();
    fooDiffs = foo.getDiffs().asList();
    assertEquals(4, fooDiffs.size());
    assertEquals(s2222.getId(), fooDiffs.get(3).getSnapshotId());
    bar1Diffs = bar1.getDiffs().asList();
    assertEquals(3, bar1Diffs.size());
    assertEquals(s333.getId(), bar1Diffs.get(2).getSnapshotId());
    barRef = fsdir.getINode(bar_s2222.toString()).asReference();
    barWithCount = (WithCount) barRef.getReferredINode();
    assertEquals(4, barWithCount.getReferenceCount());
    bar = barWithCount.asFile();
    barDiffs = bar.getDiffs().asList();
    assertEquals(4, barDiffs.size());
    assertEquals(s2222.getId(), barDiffs.get(3).getSnapshotId());
}
Also used : Path(org.apache.hadoop.fs.Path) INodesInPath(org.apache.hadoop.hdfs.server.namenode.INodesInPath) INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) DirectoryDiff(org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff) FileStatus(org.apache.hadoop.fs.FileStatus) WithCount(org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount) WithCount(org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount) INodeReference(org.apache.hadoop.hdfs.server.namenode.INodeReference) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) Test(org.junit.Test)

Aggregations

INodeFile (org.apache.hadoop.hdfs.server.namenode.INodeFile)35 Path (org.apache.hadoop.fs.Path)24 Test (org.junit.Test)23 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)14 INodeDirectory (org.apache.hadoop.hdfs.server.namenode.INodeDirectory)10 INodesInPath (org.apache.hadoop.hdfs.server.namenode.INodesInPath)9 INode (org.apache.hadoop.hdfs.server.namenode.INode)7 FileStatus (org.apache.hadoop.fs.FileStatus)6 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)6 INodeReference (org.apache.hadoop.hdfs.server.namenode.INodeReference)5 TestINodeFile (org.apache.hadoop.hdfs.server.namenode.TestINodeFile)5 IOException (java.io.IOException)4 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)4 Block (org.apache.hadoop.hdfs.protocol.Block)4 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)4 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)4 ReceivedDeletedBlockInfo (org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo)4 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)3 FSDirectory (org.apache.hadoop.hdfs.server.namenode.FSDirectory)3 DirectoryDiff (org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff)3