Search in sources :

Example 1 with INodesInPath

use of org.apache.hadoop.hdfs.server.namenode.INodesInPath in project hadoop by apache.

the class TestReservedRawPaths method testINodesInPath.

/**
   * Verify resolving path will return an iip that tracks if the original
   * path was a raw path.
   */
@Test(timeout = 120000)
public void testINodesInPath() throws IOException {
    FSDirectory fsd = cluster.getNamesystem().getFSDirectory();
    final String path = "/path";
    INodesInPath iip = fsd.resolvePath(null, path, DirOp.READ);
    assertFalse(iip.isRaw());
    assertEquals(path, iip.getPath());
    iip = fsd.resolvePath(null, "/.reserved/raw" + path, DirOp.READ);
    assertTrue(iip.isRaw());
    assertEquals(path, iip.getPath());
}
Also used : INodesInPath(org.apache.hadoop.hdfs.server.namenode.INodesInPath) FSDirectory(org.apache.hadoop.hdfs.server.namenode.FSDirectory) Test(org.junit.Test)

Example 2 with INodesInPath

use of org.apache.hadoop.hdfs.server.namenode.INodesInPath in project hadoop by apache.

the class SnapshotManager method resetSnapshottable.

/**
   * Set the given snapshottable directory to non-snapshottable.
   * 
   * @throws SnapshotException if there are snapshots in the directory.
   */
public void resetSnapshottable(final String path) throws IOException {
    final INodesInPath iip = fsdir.getINodesInPath(path, DirOp.WRITE);
    final INodeDirectory d = INodeDirectory.valueOf(iip.getLastINode(), path);
    DirectorySnapshottableFeature sf = d.getDirectorySnapshottableFeature();
    if (sf == null) {
        // the directory is already non-snapshottable
        return;
    }
    if (sf.getNumSnapshots() > 0) {
        throw new SnapshotException("The directory " + path + " has snapshot(s). " + "Please redo the operation after removing all the snapshots.");
    }
    if (d == fsdir.getRoot()) {
        d.setSnapshotQuota(0);
    } else {
        d.removeSnapshottableFeature();
    }
    removeSnapshottable(d);
}
Also used : INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) INodesInPath(org.apache.hadoop.hdfs.server.namenode.INodesInPath) SnapshotException(org.apache.hadoop.hdfs.protocol.SnapshotException)

Example 3 with INodesInPath

use of org.apache.hadoop.hdfs.server.namenode.INodesInPath in project hadoop by apache.

the class SnapshotManager method setSnapshottable.

/**
   * Set the given directory as a snapshottable directory.
   * If the path is already a snapshottable directory, update the quota.
   */
public void setSnapshottable(final String path, boolean checkNestedSnapshottable) throws IOException {
    final INodesInPath iip = fsdir.getINodesInPath(path, DirOp.WRITE);
    final INodeDirectory d = INodeDirectory.valueOf(iip.getLastINode(), path);
    if (checkNestedSnapshottable) {
        checkNestedSnapshottable(d, path);
    }
    if (d.isSnapshottable()) {
        //The directory is already a snapshottable directory.
        d.setSnapshotQuota(DirectorySnapshottableFeature.SNAPSHOT_LIMIT);
    } else {
        d.addSnapshottableFeature();
    }
    addSnapshottable(d);
}
Also used : INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) INodesInPath(org.apache.hadoop.hdfs.server.namenode.INodesInPath)

Example 4 with INodesInPath

use of org.apache.hadoop.hdfs.server.namenode.INodesInPath in project hadoop by apache.

the class TestRenameWithSnapshots method testRenameUndo_6.

/**
   * Test the rename undo when removing dst node fails
   */
@Test
public void testRenameUndo_6() throws Exception {
    final Path test = new Path("/test");
    final Path dir1 = new Path(test, "dir1");
    final Path dir2 = new Path(test, "dir2");
    final Path sub_dir2 = new Path(dir2, "subdir");
    final Path subsub_dir2 = new Path(sub_dir2, "subdir");
    hdfs.mkdirs(dir1);
    hdfs.mkdirs(subsub_dir2);
    final Path foo = new Path(dir1, "foo");
    hdfs.mkdirs(foo);
    SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
    SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");
    // set ns quota of dir2 to 4, so the current remaining is 1 (already has
    // dir2, sub_dir2, and subsub_dir2)
    hdfs.setQuota(dir2, 4, Long.MAX_VALUE - 1);
    FSDirectory fsdir2 = Mockito.spy(fsdir);
    Mockito.doThrow(new RuntimeException("fake exception")).when(fsdir2).removeLastINode((INodesInPath) Mockito.anyObject());
    Whitebox.setInternalState(fsn, "dir", fsdir2);
    // subsub_dir2.
    try {
        hdfs.rename(foo, subsub_dir2, Rename.OVERWRITE);
        fail("Expect QuotaExceedException");
    } catch (Exception e) {
        String msg = "fake exception";
        GenericTestUtils.assertExceptionContains(msg, e);
    }
    // check the undo
    assertTrue(hdfs.exists(foo));
    INodeDirectory dir1Node = fsdir2.getINode4Write(dir1.toString()).asDirectory();
    List<INode> childrenList = ReadOnlyList.Util.asList(dir1Node.getChildrenList(Snapshot.CURRENT_STATE_ID));
    assertEquals(1, childrenList.size());
    INode fooNode = childrenList.get(0);
    assertTrue(fooNode.asDirectory().isWithSnapshot());
    assertSame(dir1Node, fooNode.getParent());
    List<DirectoryDiff> diffList = dir1Node.getDiffs().asList();
    assertEquals(1, diffList.size());
    DirectoryDiff diff = diffList.get(0);
    assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
    assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
    // check dir2
    INodeDirectory dir2Node = fsdir2.getINode4Write(dir2.toString()).asDirectory();
    assertTrue(dir2Node.isSnapshottable());
    QuotaCounts counts = dir2Node.computeQuotaUsage(fsdir.getBlockStoragePolicySuite());
    assertEquals(3, counts.getNameSpace());
    assertEquals(0, counts.getStorageSpace());
    childrenList = ReadOnlyList.Util.asList(dir2Node.asDirectory().getChildrenList(Snapshot.CURRENT_STATE_ID));
    assertEquals(1, childrenList.size());
    INode subdir2Node = childrenList.get(0);
    assertSame(dir2Node, subdir2Node.getParent());
    assertSame(subdir2Node, fsdir2.getINode4Write(sub_dir2.toString()));
    INode subsubdir2Node = fsdir2.getINode4Write(subsub_dir2.toString());
    assertTrue(subsubdir2Node.getClass() == INodeDirectory.class);
    assertSame(subdir2Node, subsubdir2Node.getParent());
    diffList = (dir2Node).getDiffs().asList();
    assertEquals(1, diffList.size());
    diff = diffList.get(0);
    assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
    assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
}
Also used : Path(org.apache.hadoop.fs.Path) INodesInPath(org.apache.hadoop.hdfs.server.namenode.INodesInPath) INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) INode(org.apache.hadoop.hdfs.server.namenode.INode) DirectoryDiff(org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff) QuotaCounts(org.apache.hadoop.hdfs.server.namenode.QuotaCounts) FSDirectory(org.apache.hadoop.hdfs.server.namenode.FSDirectory) NSQuotaExceededException(org.apache.hadoop.hdfs.protocol.NSQuotaExceededException) IOException(java.io.IOException) Test(org.junit.Test)

Example 5 with INodesInPath

use of org.apache.hadoop.hdfs.server.namenode.INodesInPath in project hadoop by apache.

the class TestRenameWithSnapshots method testRenameUndo_5.

/**
   * Test rename while the rename operation will exceed the quota in the dst
   * tree.
   */
@Test
public void testRenameUndo_5() throws Exception {
    final Path test = new Path("/test");
    final Path dir1 = new Path(test, "dir1");
    final Path dir2 = new Path(test, "dir2");
    final Path subdir2 = new Path(dir2, "subdir2");
    hdfs.mkdirs(dir1);
    hdfs.mkdirs(subdir2);
    final Path foo = new Path(dir1, "foo");
    final Path bar = new Path(foo, "bar");
    DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
    SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
    SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");
    // set ns quota of dir2 to 4, so the current remaining is 2 (already has
    // dir2, and subdir2)
    hdfs.setQuota(dir2, 4, Long.MAX_VALUE - 1);
    final Path foo2 = new Path(subdir2, foo.getName());
    FSDirectory fsdir2 = Mockito.spy(fsdir);
    Mockito.doThrow(new NSQuotaExceededException("fake exception")).when(fsdir2).addLastINode((INodesInPath) Mockito.anyObject(), (INode) Mockito.anyObject(), (FsPermission) Mockito.anyObject(), Mockito.anyBoolean());
    Whitebox.setInternalState(fsn, "dir", fsdir2);
    // rename /test/dir1/foo to /test/dir2/subdir2/foo. 
    // FSDirectory#verifyQuota4Rename will pass since the remaining quota is 2.
    // However, the rename operation will fail since we let addLastINode throw
    // NSQuotaExceededException
    boolean rename = hdfs.rename(foo, foo2);
    assertFalse(rename);
    // check the undo
    assertTrue(hdfs.exists(foo));
    assertTrue(hdfs.exists(bar));
    INodeDirectory dir1Node = fsdir2.getINode4Write(dir1.toString()).asDirectory();
    List<INode> childrenList = ReadOnlyList.Util.asList(dir1Node.getChildrenList(Snapshot.CURRENT_STATE_ID));
    assertEquals(1, childrenList.size());
    INode fooNode = childrenList.get(0);
    assertTrue(fooNode.asDirectory().isWithSnapshot());
    INode barNode = fsdir2.getINode4Write(bar.toString());
    assertTrue(barNode.getClass() == INodeFile.class);
    assertSame(fooNode, barNode.getParent());
    List<DirectoryDiff> diffList = dir1Node.getDiffs().asList();
    assertEquals(1, diffList.size());
    DirectoryDiff diff = diffList.get(0);
    assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
    assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
    // check dir2
    INodeDirectory dir2Node = fsdir2.getINode4Write(dir2.toString()).asDirectory();
    assertTrue(dir2Node.isSnapshottable());
    QuotaCounts counts = dir2Node.computeQuotaUsage(fsdir.getBlockStoragePolicySuite());
    assertEquals(2, counts.getNameSpace());
    assertEquals(0, counts.getStorageSpace());
    childrenList = ReadOnlyList.Util.asList(dir2Node.asDirectory().getChildrenList(Snapshot.CURRENT_STATE_ID));
    assertEquals(1, childrenList.size());
    INode subdir2Node = childrenList.get(0);
    assertSame(dir2Node, subdir2Node.getParent());
    assertSame(subdir2Node, fsdir2.getINode4Write(subdir2.toString()));
    diffList = dir2Node.getDiffs().asList();
    assertEquals(1, diffList.size());
    diff = diffList.get(0);
    assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
    assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
}
Also used : Path(org.apache.hadoop.fs.Path) INodesInPath(org.apache.hadoop.hdfs.server.namenode.INodesInPath) INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) INode(org.apache.hadoop.hdfs.server.namenode.INode) DirectoryDiff(org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff) QuotaCounts(org.apache.hadoop.hdfs.server.namenode.QuotaCounts) FSDirectory(org.apache.hadoop.hdfs.server.namenode.FSDirectory) NSQuotaExceededException(org.apache.hadoop.hdfs.protocol.NSQuotaExceededException) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) Test(org.junit.Test)

Aggregations

INodesInPath (org.apache.hadoop.hdfs.server.namenode.INodesInPath)7 INodeDirectory (org.apache.hadoop.hdfs.server.namenode.INodeDirectory)5 FSDirectory (org.apache.hadoop.hdfs.server.namenode.FSDirectory)4 Test (org.junit.Test)4 Path (org.apache.hadoop.fs.Path)3 NSQuotaExceededException (org.apache.hadoop.hdfs.protocol.NSQuotaExceededException)2 SnapshotException (org.apache.hadoop.hdfs.protocol.SnapshotException)2 INode (org.apache.hadoop.hdfs.server.namenode.INode)2 INodeFile (org.apache.hadoop.hdfs.server.namenode.INodeFile)2 QuotaCounts (org.apache.hadoop.hdfs.server.namenode.QuotaCounts)2 DirectoryDiff (org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff)2 IOException (java.io.IOException)1 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)1