Search in sources :

Example 6 with INodeDirectory

use of org.apache.hadoop.hdfs.server.namenode.INodeDirectory in project hadoop by apache.

the class TestSetQuotaWithSnapshot method testSetQuota.

@Test(timeout = 60000)
public void testSetQuota() throws Exception {
    final Path dir = new Path("/TestSnapshot");
    hdfs.mkdirs(dir);
    // allow snapshot on dir and create snapshot s1
    SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
    Path sub = new Path(dir, "sub");
    hdfs.mkdirs(sub);
    Path fileInSub = new Path(sub, "file");
    DFSTestUtil.createFile(hdfs, fileInSub, BLOCKSIZE, REPLICATION, seed);
    INodeDirectory subNode = INodeDirectory.valueOf(fsdir.getINode(sub.toString()), sub);
    // subNode should be a INodeDirectory, but not an INodeDirectoryWithSnapshot
    assertFalse(subNode.isWithSnapshot());
    hdfs.setQuota(sub, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
    subNode = INodeDirectory.valueOf(fsdir.getINode(sub.toString()), sub);
    assertTrue(subNode.isQuotaSet());
    assertFalse(subNode.isWithSnapshot());
}
Also used : Path(org.apache.hadoop.fs.Path) INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) Test(org.junit.Test)

Example 7 with INodeDirectory

use of org.apache.hadoop.hdfs.server.namenode.INodeDirectory in project hadoop by apache.

the class TestRenameWithSnapshots method testRenameAndUpdateSnapshottableDirs.

/**
   * Test rename where the src/dst directories are both snapshottable 
   * directories without snapshots. In such case we need to update the 
   * snapshottable dir list in SnapshotManager.
   */
@Test(timeout = 60000)
public void testRenameAndUpdateSnapshottableDirs() throws Exception {
    final Path sdir1 = new Path("/dir1");
    final Path sdir2 = new Path("/dir2");
    final Path foo = new Path(sdir1, "foo");
    final Path bar = new Path(sdir2, "bar");
    hdfs.mkdirs(foo);
    hdfs.mkdirs(bar);
    hdfs.allowSnapshot(foo);
    SnapshotTestHelper.createSnapshot(hdfs, bar, snap1);
    assertEquals(2, fsn.getSnapshottableDirListing().length);
    INodeDirectory fooNode = fsdir.getINode4Write(foo.toString()).asDirectory();
    long fooId = fooNode.getId();
    try {
        hdfs.rename(foo, bar, Rename.OVERWRITE);
        fail("Expect exception since " + bar + " is snapshottable and already has snapshots");
    } catch (IOException e) {
        GenericTestUtils.assertExceptionContains(bar.toString() + " is snapshottable and already has snapshots", e);
    }
    hdfs.deleteSnapshot(bar, snap1);
    hdfs.rename(foo, bar, Rename.OVERWRITE);
    SnapshottableDirectoryStatus[] dirs = fsn.getSnapshottableDirListing();
    assertEquals(1, dirs.length);
    assertEquals(bar, dirs[0].getFullPath());
    assertEquals(fooId, dirs[0].getDirStatus().getFileId());
}
Also used : Path(org.apache.hadoop.fs.Path) INodesInPath(org.apache.hadoop.hdfs.server.namenode.INodesInPath) INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) IOException(java.io.IOException) SnapshottableDirectoryStatus(org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus) Test(org.junit.Test)

Example 8 with INodeDirectory

use of org.apache.hadoop.hdfs.server.namenode.INodeDirectory in project hadoop by apache.

the class TestRenameWithSnapshots method testRenameDirAndDeleteSnapshot_2.

/**
   * After rename, delete the snapshot in src
   */
@Test
public void testRenameDirAndDeleteSnapshot_2() throws Exception {
    final Path sdir1 = new Path("/dir1");
    final Path sdir2 = new Path("/dir2");
    hdfs.mkdirs(sdir1);
    hdfs.mkdirs(sdir2);
    final Path foo = new Path(sdir2, "foo");
    final Path bar = new Path(foo, "bar");
    DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
    SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
    SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
    SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s3");
    final Path newfoo = new Path(sdir1, "foo");
    hdfs.rename(foo, newfoo);
    // restart the cluster and check fsimage
    restartClusterAndCheckImage(true);
    final Path bar2 = new Path(newfoo, "bar2");
    DFSTestUtil.createFile(hdfs, bar2, BLOCKSIZE, REPL, SEED);
    hdfs.createSnapshot(sdir1, "s4");
    hdfs.delete(newfoo, true);
    final Path bar2_s4 = SnapshotTestHelper.getSnapshotPath(sdir1, "s4", "foo/bar2");
    assertTrue(hdfs.exists(bar2_s4));
    final Path bar_s4 = SnapshotTestHelper.getSnapshotPath(sdir1, "s4", "foo/bar");
    assertTrue(hdfs.exists(bar_s4));
    // delete snapshot s4. The diff of s4 should be combined to s3
    hdfs.deleteSnapshot(sdir1, "s4");
    // restart the cluster and check fsimage
    restartClusterAndCheckImage(true);
    Path bar_s3 = SnapshotTestHelper.getSnapshotPath(sdir1, "s3", "foo/bar");
    assertFalse(hdfs.exists(bar_s3));
    bar_s3 = SnapshotTestHelper.getSnapshotPath(sdir2, "s3", "foo/bar");
    assertTrue(hdfs.exists(bar_s3));
    Path bar2_s3 = SnapshotTestHelper.getSnapshotPath(sdir1, "s3", "foo/bar2");
    assertFalse(hdfs.exists(bar2_s3));
    bar2_s3 = SnapshotTestHelper.getSnapshotPath(sdir2, "s3", "foo/bar2");
    assertFalse(hdfs.exists(bar2_s3));
    // delete snapshot s3
    hdfs.deleteSnapshot(sdir2, "s3");
    final Path bar_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2", "foo/bar");
    assertTrue(hdfs.exists(bar_s2));
    // check internal details
    INodeDirectory sdir2Node = fsdir.getINode(sdir2.toString()).asDirectory();
    Snapshot s2 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
    final Path foo_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2", "foo");
    INodeReference fooRef = fsdir.getINode(foo_s2.toString()).asReference();
    assertTrue(fooRef instanceof INodeReference.WithName);
    INodeReference.WithCount fooWC = (WithCount) fooRef.getReferredINode();
    assertEquals(1, fooWC.getReferenceCount());
    INodeDirectory fooDir = fooWC.getReferredINode().asDirectory();
    List<DirectoryDiff> diffs = fooDir.getDiffs().asList();
    assertEquals(1, diffs.size());
    assertEquals(s2.getId(), diffs.get(0).getSnapshotId());
    // restart the cluster and check fsimage
    restartClusterAndCheckImage(true);
    // delete snapshot s2.
    hdfs.deleteSnapshot(sdir2, "s2");
    assertFalse(hdfs.exists(bar_s2));
    restartClusterAndCheckImage(true);
    // make sure the whole referred subtree has been destroyed
    QuotaCounts q = fsdir.getRoot().getDirectoryWithQuotaFeature().getSpaceConsumed();
    assertEquals(3, q.getNameSpace());
    assertEquals(0, q.getStorageSpace());
    hdfs.deleteSnapshot(sdir1, "s1");
    restartClusterAndCheckImage(true);
    q = fsdir.getRoot().getDirectoryWithQuotaFeature().getSpaceConsumed();
    assertEquals(3, q.getNameSpace());
    assertEquals(0, q.getStorageSpace());
}
Also used : Path(org.apache.hadoop.fs.Path) INodesInPath(org.apache.hadoop.hdfs.server.namenode.INodesInPath) INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) DirectoryDiff(org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff) WithCount(org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount) WithCount(org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount) QuotaCounts(org.apache.hadoop.hdfs.server.namenode.QuotaCounts) INodeReference(org.apache.hadoop.hdfs.server.namenode.INodeReference) Test(org.junit.Test)

Example 9 with INodeDirectory

use of org.apache.hadoop.hdfs.server.namenode.INodeDirectory in project hadoop by apache.

the class TestRenameWithSnapshots method testRenameDirAndDeleteSnapshot_7.

/**
   * Unit test for HDFS-4842.
   */
@Test
public void testRenameDirAndDeleteSnapshot_7() throws Exception {
    fsn.getSnapshotManager().setAllowNestedSnapshots(true);
    final Path test = new Path("/test");
    final Path dir1 = new Path(test, "dir1");
    final Path dir2 = new Path(test, "dir2");
    hdfs.mkdirs(dir1);
    hdfs.mkdirs(dir2);
    final Path foo = new Path(dir2, "foo");
    final Path bar = new Path(foo, "bar");
    final Path file = new Path(bar, "file");
    DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPL, SEED);
    // take a snapshot s0 and s1 on /test
    SnapshotTestHelper.createSnapshot(hdfs, test, "s0");
    SnapshotTestHelper.createSnapshot(hdfs, test, "s1");
    // delete file so we have a snapshot copy for s1 in bar
    hdfs.delete(file, true);
    // create another snapshot on dir2
    SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");
    // rename foo from dir2 to dir1
    final Path newfoo = new Path(dir1, foo.getName());
    hdfs.rename(foo, newfoo);
    // delete snapshot s1
    hdfs.deleteSnapshot(test, "s1");
    // make sure the snapshot copy of file in s1 is merged to s0. For 
    // HDFS-4842, we need to make sure that we do not wrongly use s2 as the
    // prior snapshot of s1.
    final Path file_s2 = SnapshotTestHelper.getSnapshotPath(dir2, "s2", "foo/bar/file");
    assertFalse(hdfs.exists(file_s2));
    final Path file_s0 = SnapshotTestHelper.getSnapshotPath(test, "s0", "dir2/foo/bar/file");
    assertTrue(hdfs.exists(file_s0));
    // check dir1: foo should be in the created list of s0
    INodeDirectory dir1Node = fsdir.getINode4Write(dir1.toString()).asDirectory();
    List<DirectoryDiff> dir1DiffList = dir1Node.getDiffs().asList();
    assertEquals(1, dir1DiffList.size());
    List<INode> dList = dir1DiffList.get(0).getChildrenDiff().getList(ListType.DELETED);
    assertTrue(dList.isEmpty());
    List<INode> cList = dir1DiffList.get(0).getChildrenDiff().getList(ListType.CREATED);
    assertEquals(1, cList.size());
    INode cNode = cList.get(0);
    INode fooNode = fsdir.getINode4Write(newfoo.toString());
    assertSame(cNode, fooNode);
    // check foo and its subtree
    final Path newbar = new Path(newfoo, bar.getName());
    INodeDirectory barNode = fsdir.getINode4Write(newbar.toString()).asDirectory();
    assertSame(fooNode.asDirectory(), barNode.getParent());
    // bar should only have a snapshot diff for s0
    List<DirectoryDiff> barDiffList = barNode.getDiffs().asList();
    assertEquals(1, barDiffList.size());
    DirectoryDiff diff = barDiffList.get(0);
    INodeDirectory testNode = fsdir.getINode4Write(test.toString()).asDirectory();
    Snapshot s0 = testNode.getSnapshot(DFSUtil.string2Bytes("s0"));
    assertEquals(s0.getId(), diff.getSnapshotId());
    // and file should be stored in the deleted list of this snapshot diff
    assertEquals("file", diff.getChildrenDiff().getList(ListType.DELETED).get(0).getLocalName());
    // check dir2: a WithName instance for foo should be in the deleted list
    // of the snapshot diff for s2
    INodeDirectory dir2Node = fsdir.getINode4Write(dir2.toString()).asDirectory();
    List<DirectoryDiff> dir2DiffList = dir2Node.getDiffs().asList();
    // dir2Node should contain 1 snapshot diffs for s2
    assertEquals(1, dir2DiffList.size());
    dList = dir2DiffList.get(0).getChildrenDiff().getList(ListType.DELETED);
    assertEquals(1, dList.size());
    final Path foo_s2 = SnapshotTestHelper.getSnapshotPath(dir2, "s2", foo.getName());
    INodeReference.WithName fooNode_s2 = (INodeReference.WithName) fsdir.getINode(foo_s2.toString());
    assertSame(dList.get(0), fooNode_s2);
    assertSame(fooNode.asReference().getReferredINode(), fooNode_s2.getReferredINode());
    restartClusterAndCheckImage(true);
}
Also used : Path(org.apache.hadoop.fs.Path) INodesInPath(org.apache.hadoop.hdfs.server.namenode.INodesInPath) INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) DirectoryDiff(org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff) INode(org.apache.hadoop.hdfs.server.namenode.INode) INodeReference(org.apache.hadoop.hdfs.server.namenode.INodeReference) Test(org.junit.Test)

Example 10 with INodeDirectory

use of org.apache.hadoop.hdfs.server.namenode.INodeDirectory in project hadoop by apache.

the class TestRenameWithSnapshots method testRenameFileAndDeleteSnapshot.

/**
   * Test renaming a file and then delete snapshots.
   */
@Test
public void testRenameFileAndDeleteSnapshot() throws Exception {
    final Path sdir1 = new Path("/dir1");
    final Path sdir2 = new Path("/dir2");
    hdfs.mkdirs(sdir1);
    hdfs.mkdirs(sdir2);
    final Path foo = new Path(sdir2, "foo");
    DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
    SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
    SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
    hdfs.createSnapshot(sdir1, "s3");
    final Path newfoo = new Path(sdir1, "foo");
    hdfs.rename(foo, newfoo);
    hdfs.setReplication(newfoo, REPL_1);
    hdfs.createSnapshot(sdir1, "s4");
    hdfs.setReplication(newfoo, REPL_2);
    FileStatus status = hdfs.getFileStatus(newfoo);
    assertEquals(REPL_2, status.getReplication());
    final Path foo_s4 = SnapshotTestHelper.getSnapshotPath(sdir1, "s4", "foo");
    status = hdfs.getFileStatus(foo_s4);
    assertEquals(REPL_1, status.getReplication());
    hdfs.createSnapshot(sdir1, "s5");
    final Path foo_s5 = SnapshotTestHelper.getSnapshotPath(sdir1, "s5", "foo");
    status = hdfs.getFileStatus(foo_s5);
    assertEquals(REPL_2, status.getReplication());
    // delete snapshot s5.
    hdfs.deleteSnapshot(sdir1, "s5");
    restartClusterAndCheckImage(true);
    assertFalse(hdfs.exists(foo_s5));
    status = hdfs.getFileStatus(foo_s4);
    assertEquals(REPL_1, status.getReplication());
    // delete snapshot s4.
    hdfs.deleteSnapshot(sdir1, "s4");
    assertFalse(hdfs.exists(foo_s4));
    Path foo_s3 = SnapshotTestHelper.getSnapshotPath(sdir1, "s3", "foo");
    assertFalse(hdfs.exists(foo_s3));
    foo_s3 = SnapshotTestHelper.getSnapshotPath(sdir2, "s3", "foo");
    assertFalse(hdfs.exists(foo_s3));
    final Path foo_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2", "foo");
    assertTrue(hdfs.exists(foo_s2));
    status = hdfs.getFileStatus(foo_s2);
    assertEquals(REPL, status.getReplication());
    INodeFile snode = fsdir.getINode(newfoo.toString()).asFile();
    assertEquals(1, snode.getDiffs().asList().size());
    INodeDirectory sdir2Node = fsdir.getINode(sdir2.toString()).asDirectory();
    Snapshot s2 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
    assertEquals(s2.getId(), snode.getDiffs().getLastSnapshotId());
    // restart cluster
    restartClusterAndCheckImage(true);
    // delete snapshot s2.
    hdfs.deleteSnapshot(sdir2, "s2");
    assertFalse(hdfs.exists(foo_s2));
    // restart the cluster and check fsimage
    restartClusterAndCheckImage(true);
    hdfs.deleteSnapshot(sdir1, "s3");
    restartClusterAndCheckImage(true);
    hdfs.deleteSnapshot(sdir1, "s1");
    restartClusterAndCheckImage(true);
}
Also used : Path(org.apache.hadoop.fs.Path) INodesInPath(org.apache.hadoop.hdfs.server.namenode.INodesInPath) INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) FileStatus(org.apache.hadoop.fs.FileStatus) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) Test(org.junit.Test)

Aggregations

INodeDirectory (org.apache.hadoop.hdfs.server.namenode.INodeDirectory)46 Test (org.junit.Test)29 Path (org.apache.hadoop.fs.Path)26 INodesInPath (org.apache.hadoop.hdfs.server.namenode.INodesInPath)22 INode (org.apache.hadoop.hdfs.server.namenode.INode)21 DirectoryDiff (org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff)14 INodeReference (org.apache.hadoop.hdfs.server.namenode.INodeReference)12 INodeFile (org.apache.hadoop.hdfs.server.namenode.INodeFile)11 WithCount (org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount)7 QuotaCounts (org.apache.hadoop.hdfs.server.namenode.QuotaCounts)6 IOException (java.io.IOException)5 FileStatus (org.apache.hadoop.fs.FileStatus)5 SnapshotException (org.apache.hadoop.hdfs.protocol.SnapshotException)5 ChildrenDiff (org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.ChildrenDiff)5 FSDirectory (org.apache.hadoop.hdfs.server.namenode.FSDirectory)4 NSQuotaExceededException (org.apache.hadoop.hdfs.protocol.NSQuotaExceededException)3 SnapshottableDirectoryStatus (org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus)3 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)2 SnapshotAndINode (org.apache.hadoop.hdfs.server.namenode.INodeDirectory.SnapshotAndINode)2 FileNotFoundException (java.io.FileNotFoundException)1