Search in sources :

Example 31 with INodeDirectory

use of org.apache.hadoop.hdfs.server.namenode.INodeDirectory in project hadoop by apache.

the class TestRenameWithSnapshots method testRenameDirAndDeleteSnapshot_6.

/**
   * Rename and deletion snapshot under the same the snapshottable directory.
   */
@Test
public void testRenameDirAndDeleteSnapshot_6() throws Exception {
    final Path test = new Path("/test");
    final Path dir1 = new Path(test, "dir1");
    final Path dir2 = new Path(test, "dir2");
    hdfs.mkdirs(dir1);
    hdfs.mkdirs(dir2);
    final Path foo = new Path(dir2, "foo");
    final Path bar = new Path(foo, "bar");
    final Path file = new Path(bar, "file");
    DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPL, SEED);
    // take a snapshot on /test
    SnapshotTestHelper.createSnapshot(hdfs, test, "s0");
    // delete /test/dir2/foo/bar/file after snapshot s0, so that there is a 
    // snapshot copy recorded in bar
    hdfs.delete(file, true);
    // rename foo from dir2 to dir1
    final Path newfoo = new Path(dir1, foo.getName());
    hdfs.rename(foo, newfoo);
    final Path foo_s0 = SnapshotTestHelper.getSnapshotPath(test, "s0", "dir2/foo");
    assertTrue("the snapshot path " + foo_s0 + " should exist", hdfs.exists(foo_s0));
    // delete snapshot s0. The deletion will first go down through dir1, and 
    // find foo in the created list of dir1. Then it will use null as the prior
    // snapshot and continue the snapshot deletion process in the subtree of 
    // foo. We need to make sure the snapshot s0 can be deleted cleanly in the
    // foo subtree.
    hdfs.deleteSnapshot(test, "s0");
    // check the internal
    assertFalse("after deleting s0, " + foo_s0 + " should not exist", hdfs.exists(foo_s0));
    INodeDirectory dir2Node = fsdir.getINode4Write(dir2.toString()).asDirectory();
    assertTrue("the diff list of " + dir2 + " should be empty after deleting s0", dir2Node.getDiffs().asList().isEmpty());
    assertTrue(hdfs.exists(newfoo));
    INode fooRefNode = fsdir.getINode4Write(newfoo.toString());
    assertTrue(fooRefNode instanceof INodeReference.DstReference);
    INodeDirectory fooNode = fooRefNode.asDirectory();
    // fooNode should be still INodeDirectory (With Snapshot) since we call
    // recordModification before the rename
    assertTrue(fooNode.isWithSnapshot());
    assertTrue(fooNode.getDiffs().asList().isEmpty());
    INodeDirectory barNode = fooNode.getChildrenList(Snapshot.CURRENT_STATE_ID).get(0).asDirectory();
    // bar should also be INodeDirectory (With Snapshot), and both of its diff 
    // list and children list are empty 
    assertTrue(barNode.getDiffs().asList().isEmpty());
    assertTrue(barNode.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty());
    restartClusterAndCheckImage(true);
}
Also used : Path(org.apache.hadoop.fs.Path) INodesInPath(org.apache.hadoop.hdfs.server.namenode.INodesInPath) INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) INode(org.apache.hadoop.hdfs.server.namenode.INode) INodeReference(org.apache.hadoop.hdfs.server.namenode.INodeReference) Test(org.junit.Test)

Example 32 with INodeDirectory

use of org.apache.hadoop.hdfs.server.namenode.INodeDirectory in project hadoop by apache.

the class TestRenameWithSnapshots method testRenameUndo_3.

/**
   * Test the undo section of the second-time rename.
   */
@Test
public void testRenameUndo_3() throws Exception {
    final Path sdir1 = new Path("/dir1");
    final Path sdir2 = new Path("/dir2");
    final Path sdir3 = new Path("/dir3");
    hdfs.mkdirs(sdir1);
    hdfs.mkdirs(sdir2);
    hdfs.mkdirs(sdir3);
    final Path foo = new Path(sdir1, "foo");
    final Path bar = new Path(foo, "bar");
    DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
    SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
    SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
    INodeDirectory dir3 = fsdir.getINode4Write(sdir3.toString()).asDirectory();
    INodeDirectory mockDir3 = spy(dir3);
    doReturn(false).when(mockDir3).addChild((INode) anyObject(), anyBoolean(), Mockito.anyInt());
    INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
    root.replaceChild(dir3, mockDir3, fsdir.getINodeMap());
    final Path foo_dir2 = new Path(sdir2, "foo2");
    final Path foo_dir3 = new Path(sdir3, "foo3");
    hdfs.rename(foo, foo_dir2);
    boolean result = hdfs.rename(foo_dir2, foo_dir3);
    assertFalse(result);
    // check the current internal details
    INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString()).asDirectory();
    Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
    INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString()).asDirectory();
    Snapshot s2 = dir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
    ReadOnlyList<INode> dir2Children = dir2Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
    assertEquals(1, dir2Children.size());
    List<DirectoryDiff> dir2Diffs = dir2Node.getDiffs().asList();
    assertEquals(1, dir2Diffs.size());
    assertEquals(s2.getId(), dir2Diffs.get(0).getSnapshotId());
    ChildrenDiff childrenDiff = dir2Diffs.get(0).getChildrenDiff();
    assertEquals(0, childrenDiff.getList(ListType.DELETED).size());
    assertEquals(1, childrenDiff.getList(ListType.CREATED).size());
    final Path foo_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2", "foo2");
    assertFalse(hdfs.exists(foo_s2));
    INode fooNode = fsdir.getINode4Write(foo_dir2.toString());
    assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode);
    assertTrue(fooNode instanceof INodeReference.DstReference);
    List<DirectoryDiff> fooDiffs = fooNode.asDirectory().getDiffs().asList();
    assertEquals(1, fooDiffs.size());
    assertEquals(s1.getId(), fooDiffs.get(0).getSnapshotId());
    // create snapshot on sdir2 and rename again
    hdfs.createSnapshot(sdir2, "s3");
    result = hdfs.rename(foo_dir2, foo_dir3);
    assertFalse(result);
    // check internal details again
    dir2Node = fsdir.getINode4Write(sdir2.toString()).asDirectory();
    Snapshot s3 = dir2Node.getSnapshot(DFSUtil.string2Bytes("s3"));
    fooNode = fsdir.getINode4Write(foo_dir2.toString());
    dir2Children = dir2Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
    assertEquals(1, dir2Children.size());
    dir2Diffs = dir2Node.getDiffs().asList();
    assertEquals(2, dir2Diffs.size());
    assertEquals(s2.getId(), dir2Diffs.get(0).getSnapshotId());
    assertEquals(s3.getId(), dir2Diffs.get(1).getSnapshotId());
    childrenDiff = dir2Diffs.get(0).getChildrenDiff();
    assertEquals(0, childrenDiff.getList(ListType.DELETED).size());
    assertEquals(1, childrenDiff.getList(ListType.CREATED).size());
    assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode);
    childrenDiff = dir2Diffs.get(1).getChildrenDiff();
    assertEquals(0, childrenDiff.getList(ListType.DELETED).size());
    assertEquals(0, childrenDiff.getList(ListType.CREATED).size());
    final Path foo_s3 = SnapshotTestHelper.getSnapshotPath(sdir2, "s3", "foo2");
    assertFalse(hdfs.exists(foo_s2));
    assertTrue(hdfs.exists(foo_s3));
    assertTrue(fooNode instanceof INodeReference.DstReference);
    fooDiffs = fooNode.asDirectory().getDiffs().asList();
    assertEquals(2, fooDiffs.size());
    assertEquals(s1.getId(), fooDiffs.get(0).getSnapshotId());
    assertEquals(s3.getId(), fooDiffs.get(1).getSnapshotId());
}
Also used : Path(org.apache.hadoop.fs.Path) INodesInPath(org.apache.hadoop.hdfs.server.namenode.INodesInPath) INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) INode(org.apache.hadoop.hdfs.server.namenode.INode) DirectoryDiff(org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff) ChildrenDiff(org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.ChildrenDiff) INodeReference(org.apache.hadoop.hdfs.server.namenode.INodeReference) Test(org.junit.Test)

Example 33 with INodeDirectory

use of org.apache.hadoop.hdfs.server.namenode.INodeDirectory in project hadoop by apache.

the class TestSnapshotManager method testSnapshotLimits.

/**
   * Test that the global limit on snapshots is honored.
   */
@Test(timeout = 10000)
public void testSnapshotLimits() throws Exception {
    // Setup mock objects for SnapshotManager.createSnapshot.
    //
    INodeDirectory ids = mock(INodeDirectory.class);
    FSDirectory fsdir = mock(FSDirectory.class);
    INodesInPath iip = mock(INodesInPath.class);
    SnapshotManager sm = spy(new SnapshotManager(fsdir));
    doReturn(ids).when(sm).getSnapshottableRoot((INodesInPath) anyObject());
    doReturn(testMaxSnapshotLimit).when(sm).getMaxSnapshotID();
    //
    for (Integer i = 0; i < testMaxSnapshotLimit; ++i) {
        sm.createSnapshot(iip, "dummy", i.toString());
    }
    //
    try {
        sm.createSnapshot(iip, "dummy", "shouldFailSnapshot");
        Assert.fail("Expected SnapshotException not thrown");
    } catch (SnapshotException se) {
        Assert.assertTrue(StringUtils.toLowerCase(se.getMessage()).contains("rollover"));
    }
    // Delete a snapshot to free up a slot.
    //
    sm.deleteSnapshot(iip, "", mock(INode.ReclaimContext.class));
    //
    try {
        sm.createSnapshot(iip, "dummy", "shouldFailSnapshot2");
        Assert.fail("Expected SnapshotException not thrown");
    } catch (SnapshotException se) {
        Assert.assertTrue(StringUtils.toLowerCase(se.getMessage()).contains("rollover"));
    }
}
Also used : INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) INodesInPath(org.apache.hadoop.hdfs.server.namenode.INodesInPath) FSDirectory(org.apache.hadoop.hdfs.server.namenode.FSDirectory) SnapshotException(org.apache.hadoop.hdfs.protocol.SnapshotException) Test(org.junit.Test)

Example 34 with INodeDirectory

use of org.apache.hadoop.hdfs.server.namenode.INodeDirectory in project hadoop by apache.

the class TestSnapshotDeletion method testDeleteEarliestSnapshot2.

/**
   * Test deleting the earliest (first) snapshot. In this more complicated 
   * scenario, the snapshots are taken across directories.
   * <pre>
   * The test covers the following scenarios:
   * 1. delete the first diff in the diff list of a directory
   * 2. delete the first diff in the diff list of a file
   * </pre>
   * Also, the recursive cleanTree process should cover both INodeFile and 
   * INodeDirectory.
   */
@Test(timeout = 300000)
public void testDeleteEarliestSnapshot2() throws Exception {
    Path noChangeDir = new Path(sub, "noChangeDir");
    Path noChangeFile = new Path(noChangeDir, "noChangeFile");
    Path metaChangeFile = new Path(noChangeDir, "metaChangeFile");
    Path metaChangeDir = new Path(noChangeDir, "metaChangeDir");
    Path toDeleteFile = new Path(metaChangeDir, "toDeleteFile");
    DFSTestUtil.createFile(hdfs, noChangeFile, BLOCKSIZE, REPLICATION, seed);
    DFSTestUtil.createFile(hdfs, metaChangeFile, BLOCKSIZE, REPLICATION, seed);
    DFSTestUtil.createFile(hdfs, toDeleteFile, BLOCKSIZE, REPLICATION, seed);
    final INodeFile toDeleteFileNode = TestSnapshotBlocksMap.assertBlockCollection(toDeleteFile.toString(), 1, fsdir, blockmanager);
    BlockInfo[] blocks = toDeleteFileNode.getBlocks();
    // create snapshot s0 on dir
    SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");
    checkQuotaUsageComputation(dir, 7, 3 * BLOCKSIZE * REPLICATION);
    // delete /TestSnapshot/sub/noChangeDir/metaChangeDir/toDeleteFile
    hdfs.delete(toDeleteFile, true);
    // the deletion adds diff of toDeleteFile and metaChangeDir
    checkQuotaUsageComputation(dir, 7, 3 * BLOCKSIZE * REPLICATION);
    // change metadata of /TestSnapshot/sub/noChangeDir/metaChangeDir and
    // /TestSnapshot/sub/noChangeDir/metaChangeFile
    hdfs.setReplication(metaChangeFile, REPLICATION_1);
    hdfs.setOwner(metaChangeDir, "unknown", "unknown");
    checkQuotaUsageComputation(dir, 7, 3 * BLOCKSIZE * REPLICATION);
    // create snapshot s1 on dir
    hdfs.createSnapshot(dir, "s1");
    checkQuotaUsageComputation(dir, 7, 3 * BLOCKSIZE * REPLICATION);
    // delete snapshot s0
    hdfs.deleteSnapshot(dir, "s0");
    // namespace: remove toDeleteFile and its diff, metaChangeFile's diff, 
    // metaChangeDir's diff, dir's diff. diskspace: remove toDeleteFile, and 
    // metaChangeFile's replication factor decreases
    checkQuotaUsageComputation(dir, 6, 2 * BLOCKSIZE * REPLICATION - BLOCKSIZE);
    for (BlockInfo b : blocks) {
        assertEquals(INVALID_INODE_ID, b.getBlockCollectionId());
    }
    // check 1. there is no snapshot s0
    final INodeDirectory dirNode = fsdir.getINode(dir.toString()).asDirectory();
    Snapshot snapshot0 = dirNode.getSnapshot(DFSUtil.string2Bytes("s0"));
    assertNull(snapshot0);
    Snapshot snapshot1 = dirNode.getSnapshot(DFSUtil.string2Bytes("s1"));
    DirectoryDiffList diffList = dirNode.getDiffs();
    assertEquals(1, diffList.asList().size());
    assertEquals(snapshot1.getId(), diffList.getLast().getSnapshotId());
    diffList = fsdir.getINode(metaChangeDir.toString()).asDirectory().getDiffs();
    assertEquals(0, diffList.asList().size());
    // check 2. noChangeDir and noChangeFile are still there
    final INodeDirectory noChangeDirNode = (INodeDirectory) fsdir.getINode(noChangeDir.toString());
    assertEquals(INodeDirectory.class, noChangeDirNode.getClass());
    final INodeFile noChangeFileNode = (INodeFile) fsdir.getINode(noChangeFile.toString());
    assertEquals(INodeFile.class, noChangeFileNode.getClass());
    TestSnapshotBlocksMap.assertBlockCollection(noChangeFile.toString(), 1, fsdir, blockmanager);
    // check 3: current metadata of metaChangeFile and metaChangeDir
    FileStatus status = hdfs.getFileStatus(metaChangeDir);
    assertEquals("unknown", status.getOwner());
    assertEquals("unknown", status.getGroup());
    status = hdfs.getFileStatus(metaChangeFile);
    assertEquals(REPLICATION_1, status.getReplication());
    TestSnapshotBlocksMap.assertBlockCollection(metaChangeFile.toString(), 1, fsdir, blockmanager);
    // check 4: no snapshot copy for toDeleteFile
    try {
        hdfs.getFileStatus(toDeleteFile);
        fail("should throw FileNotFoundException");
    } catch (FileNotFoundException e) {
        GenericTestUtils.assertExceptionContains("File does not exist: " + toDeleteFile.toString(), e);
    }
    final Path toDeleteFileInSnapshot = SnapshotTestHelper.getSnapshotPath(dir, "s0", toDeleteFile.toString().substring(dir.toString().length()));
    try {
        hdfs.getFileStatus(toDeleteFileInSnapshot);
        fail("should throw FileNotFoundException");
    } catch (FileNotFoundException e) {
        GenericTestUtils.assertExceptionContains("File does not exist: " + toDeleteFileInSnapshot.toString(), e);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) FileStatus(org.apache.hadoop.fs.FileStatus) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) DirectoryDiffList(org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiffList) FileNotFoundException(java.io.FileNotFoundException) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) Test(org.junit.Test)

Example 35 with INodeDirectory

use of org.apache.hadoop.hdfs.server.namenode.INodeDirectory in project hadoop by apache.

the class TestSnapshotDeletion method checkQuotaUsageComputation.

private void checkQuotaUsageComputation(final Path dirPath, final long expectedNs, final long expectedDs) throws IOException {
    INodeDirectory dirNode = getDir(fsdir, dirPath);
    assertTrue(dirNode.isQuotaSet());
    QuotaCounts q = dirNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
    assertEquals(dirNode.dumpTreeRecursively().toString(), expectedNs, q.getNameSpace());
    assertEquals(dirNode.dumpTreeRecursively().toString(), expectedDs, q.getStorageSpace());
    QuotaCounts counts = dirNode.computeQuotaUsage(fsdir.getBlockStoragePolicySuite(), false);
    assertEquals(dirNode.dumpTreeRecursively().toString(), expectedNs, counts.getNameSpace());
    assertEquals(dirNode.dumpTreeRecursively().toString(), expectedDs, counts.getStorageSpace());
}
Also used : INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) QuotaCounts(org.apache.hadoop.hdfs.server.namenode.QuotaCounts)

Aggregations

INodeDirectory (org.apache.hadoop.hdfs.server.namenode.INodeDirectory)46 Test (org.junit.Test)29 Path (org.apache.hadoop.fs.Path)26 INodesInPath (org.apache.hadoop.hdfs.server.namenode.INodesInPath)22 INode (org.apache.hadoop.hdfs.server.namenode.INode)21 DirectoryDiff (org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff)14 INodeReference (org.apache.hadoop.hdfs.server.namenode.INodeReference)12 INodeFile (org.apache.hadoop.hdfs.server.namenode.INodeFile)11 WithCount (org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount)7 QuotaCounts (org.apache.hadoop.hdfs.server.namenode.QuotaCounts)6 IOException (java.io.IOException)5 FileStatus (org.apache.hadoop.fs.FileStatus)5 SnapshotException (org.apache.hadoop.hdfs.protocol.SnapshotException)5 ChildrenDiff (org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.ChildrenDiff)5 FSDirectory (org.apache.hadoop.hdfs.server.namenode.FSDirectory)4 NSQuotaExceededException (org.apache.hadoop.hdfs.protocol.NSQuotaExceededException)3 SnapshottableDirectoryStatus (org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus)3 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)2 SnapshotAndINode (org.apache.hadoop.hdfs.server.namenode.INodeDirectory.SnapshotAndINode)2 FileNotFoundException (java.io.FileNotFoundException)1