use of org.apache.hadoop.hdfs.server.namenode.INodeDirectory in project hadoop by apache.
the class TestRenameWithSnapshots method testRenameUndo_7.
/**
* Test rename to an invalid name (xxx/.snapshot)
*/
@Test
public void testRenameUndo_7() throws Exception {
final Path root = new Path("/");
final Path foo = new Path(root, "foo");
final Path bar = new Path(foo, "bar");
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
// create a snapshot on root
SnapshotTestHelper.createSnapshot(hdfs, root, snap1);
// rename bar to /foo/.snapshot which is invalid
final Path invalid = new Path(foo, HdfsConstants.DOT_SNAPSHOT_DIR);
try {
hdfs.rename(bar, invalid);
fail("expect exception since invalid name is used for rename");
} catch (Exception e) {
GenericTestUtils.assertExceptionContains("\"" + HdfsConstants.DOT_SNAPSHOT_DIR + "\" is a reserved name", e);
}
// check
INodeDirectory rootNode = fsdir.getINode4Write(root.toString()).asDirectory();
INodeDirectory fooNode = fsdir.getINode4Write(foo.toString()).asDirectory();
ReadOnlyList<INode> children = fooNode.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1, children.size());
List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
assertEquals(1, diffList.size());
DirectoryDiff diff = diffList.get(0);
// this diff is generated while renaming
Snapshot s1 = rootNode.getSnapshot(DFSUtil.string2Bytes(snap1));
assertEquals(s1.getId(), diff.getSnapshotId());
// after undo, the diff should be empty
assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
// bar was converted to filewithsnapshot while renaming
INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
assertSame(barNode, children.get(0));
assertSame(fooNode, barNode.getParent());
List<FileDiff> barDiffList = barNode.getDiffs().asList();
assertEquals(1, barDiffList.size());
FileDiff barDiff = barDiffList.get(0);
assertEquals(s1.getId(), barDiff.getSnapshotId());
// restart cluster multiple times to make sure the fsimage and edits log are
// correct. Note that when loading fsimage, foo and bar will be converted
// back to normal INodeDirectory and INodeFile since they do not store any
// snapshot data
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
cluster.shutdown();
cluster = new MiniDFSCluster.Builder(conf).format(false).numDataNodes(REPL).build();
cluster.waitActive();
restartClusterAndCheckImage(true);
}
use of org.apache.hadoop.hdfs.server.namenode.INodeDirectory in project hadoop by apache.
the class TestRenameWithSnapshots method testRenameUndo_2.
/**
* Test the undo section of rename. Before the rename, we create the renamed
* file/dir after taking the snapshot.
*/
@Test
public void testRenameUndo_2() throws Exception {
final Path sdir1 = new Path("/dir1");
final Path sdir2 = new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path dir2file = new Path(sdir2, "file");
DFSTestUtil.createFile(hdfs, dir2file, BLOCKSIZE, REPL, SEED);
SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
// create foo after taking snapshot
final Path foo = new Path(sdir1, "foo");
final Path bar = new Path(foo, "bar");
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
INodeDirectory dir2 = fsdir.getINode4Write(sdir2.toString()).asDirectory();
INodeDirectory mockDir2 = spy(dir2);
doReturn(false).when(mockDir2).addChild((INode) anyObject(), anyBoolean(), Mockito.anyInt());
INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
root.replaceChild(dir2, mockDir2, fsdir.getINodeMap());
final Path newfoo = new Path(sdir2, "foo");
boolean result = hdfs.rename(foo, newfoo);
assertFalse(result);
// check the current internal details
INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString()).asDirectory();
Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
ReadOnlyList<INode> dir1Children = dir1Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1, dir1Children.size());
assertEquals(foo.getName(), dir1Children.get(0).getLocalName());
List<DirectoryDiff> dir1Diffs = dir1Node.getDiffs().asList();
assertEquals(1, dir1Diffs.size());
assertEquals(s1.getId(), dir1Diffs.get(0).getSnapshotId());
// after the undo of rename, the created list of sdir1 should contain
// 1 element
ChildrenDiff childrenDiff = dir1Diffs.get(0).getChildrenDiff();
assertEquals(0, childrenDiff.getList(ListType.DELETED).size());
assertEquals(1, childrenDiff.getList(ListType.CREATED).size());
INode fooNode = fsdir.getINode4Write(foo.toString());
assertTrue(fooNode instanceof INodeDirectory);
assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode);
final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1", "foo");
assertFalse(hdfs.exists(foo_s1));
// check sdir2
assertFalse(hdfs.exists(newfoo));
INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString()).asDirectory();
assertFalse(dir2Node.isWithSnapshot());
ReadOnlyList<INode> dir2Children = dir2Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1, dir2Children.size());
assertEquals(dir2file.getName(), dir2Children.get(0).getLocalName());
}
use of org.apache.hadoop.hdfs.server.namenode.INodeDirectory in project hadoop by apache.
the class TestRenameWithSnapshots method testRenameUndo_6.
/**
* Test the rename undo when removing dst node fails
*/
@Test
public void testRenameUndo_6() throws Exception {
final Path test = new Path("/test");
final Path dir1 = new Path(test, "dir1");
final Path dir2 = new Path(test, "dir2");
final Path sub_dir2 = new Path(dir2, "subdir");
final Path subsub_dir2 = new Path(sub_dir2, "subdir");
hdfs.mkdirs(dir1);
hdfs.mkdirs(subsub_dir2);
final Path foo = new Path(dir1, "foo");
hdfs.mkdirs(foo);
SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");
// set ns quota of dir2 to 4, so the current remaining is 1 (already has
// dir2, sub_dir2, and subsub_dir2)
hdfs.setQuota(dir2, 4, Long.MAX_VALUE - 1);
FSDirectory fsdir2 = Mockito.spy(fsdir);
Mockito.doThrow(new RuntimeException("fake exception")).when(fsdir2).removeLastINode((INodesInPath) Mockito.anyObject());
Whitebox.setInternalState(fsn, "dir", fsdir2);
// subsub_dir2.
try {
hdfs.rename(foo, subsub_dir2, Rename.OVERWRITE);
fail("Expect QuotaExceedException");
} catch (Exception e) {
String msg = "fake exception";
GenericTestUtils.assertExceptionContains(msg, e);
}
// check the undo
assertTrue(hdfs.exists(foo));
INodeDirectory dir1Node = fsdir2.getINode4Write(dir1.toString()).asDirectory();
List<INode> childrenList = ReadOnlyList.Util.asList(dir1Node.getChildrenList(Snapshot.CURRENT_STATE_ID));
assertEquals(1, childrenList.size());
INode fooNode = childrenList.get(0);
assertTrue(fooNode.asDirectory().isWithSnapshot());
assertSame(dir1Node, fooNode.getParent());
List<DirectoryDiff> diffList = dir1Node.getDiffs().asList();
assertEquals(1, diffList.size());
DirectoryDiff diff = diffList.get(0);
assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
// check dir2
INodeDirectory dir2Node = fsdir2.getINode4Write(dir2.toString()).asDirectory();
assertTrue(dir2Node.isSnapshottable());
QuotaCounts counts = dir2Node.computeQuotaUsage(fsdir.getBlockStoragePolicySuite());
assertEquals(3, counts.getNameSpace());
assertEquals(0, counts.getStorageSpace());
childrenList = ReadOnlyList.Util.asList(dir2Node.asDirectory().getChildrenList(Snapshot.CURRENT_STATE_ID));
assertEquals(1, childrenList.size());
INode subdir2Node = childrenList.get(0);
assertSame(dir2Node, subdir2Node.getParent());
assertSame(subdir2Node, fsdir2.getINode4Write(sub_dir2.toString()));
INode subsubdir2Node = fsdir2.getINode4Write(subsub_dir2.toString());
assertTrue(subsubdir2Node.getClass() == INodeDirectory.class);
assertSame(subdir2Node, subsubdir2Node.getParent());
diffList = (dir2Node).getDiffs().asList();
assertEquals(1, diffList.size());
diff = diffList.get(0);
assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
}
use of org.apache.hadoop.hdfs.server.namenode.INodeDirectory in project hadoop by apache.
the class TestSnapshot method testAllowAndDisallowSnapshot.
/**
* Test multiple calls of allowSnapshot and disallowSnapshot, to make sure
* they are idempotent
*/
@Test
public void testAllowAndDisallowSnapshot() throws Exception {
final Path dir = new Path("/dir");
final Path file0 = new Path(dir, "file0");
final Path file1 = new Path(dir, "file1");
DFSTestUtil.createFile(hdfs, file0, BLOCKSIZE, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
INodeDirectory dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
assertFalse(dirNode.isSnapshottable());
hdfs.allowSnapshot(dir);
dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
assertTrue(dirNode.isSnapshottable());
// call allowSnapshot again
hdfs.allowSnapshot(dir);
dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
assertTrue(dirNode.isSnapshottable());
// disallowSnapshot on dir
hdfs.disallowSnapshot(dir);
dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
assertFalse(dirNode.isSnapshottable());
// do it again
hdfs.disallowSnapshot(dir);
dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
assertFalse(dirNode.isSnapshottable());
// same process on root
final Path root = new Path("/");
INodeDirectory rootNode = fsdir.getINode4Write(root.toString()).asDirectory();
assertTrue(rootNode.isSnapshottable());
// root is snapshottable dir, but with 0 snapshot quota
assertEquals(0, rootNode.getDirectorySnapshottableFeature().getSnapshotQuota());
hdfs.allowSnapshot(root);
rootNode = fsdir.getINode4Write(root.toString()).asDirectory();
assertTrue(rootNode.isSnapshottable());
assertEquals(DirectorySnapshottableFeature.SNAPSHOT_LIMIT, rootNode.getDirectorySnapshottableFeature().getSnapshotQuota());
// call allowSnapshot again
hdfs.allowSnapshot(root);
rootNode = fsdir.getINode4Write(root.toString()).asDirectory();
assertTrue(rootNode.isSnapshottable());
assertEquals(DirectorySnapshottableFeature.SNAPSHOT_LIMIT, rootNode.getDirectorySnapshottableFeature().getSnapshotQuota());
// disallowSnapshot on dir
hdfs.disallowSnapshot(root);
rootNode = fsdir.getINode4Write(root.toString()).asDirectory();
assertTrue(rootNode.isSnapshottable());
assertEquals(0, rootNode.getDirectorySnapshottableFeature().getSnapshotQuota());
// do it again
hdfs.disallowSnapshot(root);
rootNode = fsdir.getINode4Write(root.toString()).asDirectory();
assertTrue(rootNode.isSnapshottable());
assertEquals(0, rootNode.getDirectorySnapshottableFeature().getSnapshotQuota());
}
use of org.apache.hadoop.hdfs.server.namenode.INodeDirectory in project hadoop by apache.
the class TestRenameWithSnapshots method testRenameUndo_5.
/**
* Test rename while the rename operation will exceed the quota in the dst
* tree.
*/
@Test
public void testRenameUndo_5() throws Exception {
final Path test = new Path("/test");
final Path dir1 = new Path(test, "dir1");
final Path dir2 = new Path(test, "dir2");
final Path subdir2 = new Path(dir2, "subdir2");
hdfs.mkdirs(dir1);
hdfs.mkdirs(subdir2);
final Path foo = new Path(dir1, "foo");
final Path bar = new Path(foo, "bar");
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");
// set ns quota of dir2 to 4, so the current remaining is 2 (already has
// dir2, and subdir2)
hdfs.setQuota(dir2, 4, Long.MAX_VALUE - 1);
final Path foo2 = new Path(subdir2, foo.getName());
FSDirectory fsdir2 = Mockito.spy(fsdir);
Mockito.doThrow(new NSQuotaExceededException("fake exception")).when(fsdir2).addLastINode((INodesInPath) Mockito.anyObject(), (INode) Mockito.anyObject(), (FsPermission) Mockito.anyObject(), Mockito.anyBoolean());
Whitebox.setInternalState(fsn, "dir", fsdir2);
// rename /test/dir1/foo to /test/dir2/subdir2/foo.
// FSDirectory#verifyQuota4Rename will pass since the remaining quota is 2.
// However, the rename operation will fail since we let addLastINode throw
// NSQuotaExceededException
boolean rename = hdfs.rename(foo, foo2);
assertFalse(rename);
// check the undo
assertTrue(hdfs.exists(foo));
assertTrue(hdfs.exists(bar));
INodeDirectory dir1Node = fsdir2.getINode4Write(dir1.toString()).asDirectory();
List<INode> childrenList = ReadOnlyList.Util.asList(dir1Node.getChildrenList(Snapshot.CURRENT_STATE_ID));
assertEquals(1, childrenList.size());
INode fooNode = childrenList.get(0);
assertTrue(fooNode.asDirectory().isWithSnapshot());
INode barNode = fsdir2.getINode4Write(bar.toString());
assertTrue(barNode.getClass() == INodeFile.class);
assertSame(fooNode, barNode.getParent());
List<DirectoryDiff> diffList = dir1Node.getDiffs().asList();
assertEquals(1, diffList.size());
DirectoryDiff diff = diffList.get(0);
assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
// check dir2
INodeDirectory dir2Node = fsdir2.getINode4Write(dir2.toString()).asDirectory();
assertTrue(dir2Node.isSnapshottable());
QuotaCounts counts = dir2Node.computeQuotaUsage(fsdir.getBlockStoragePolicySuite());
assertEquals(2, counts.getNameSpace());
assertEquals(0, counts.getStorageSpace());
childrenList = ReadOnlyList.Util.asList(dir2Node.asDirectory().getChildrenList(Snapshot.CURRENT_STATE_ID));
assertEquals(1, childrenList.size());
INode subdir2Node = childrenList.get(0);
assertSame(dir2Node, subdir2Node.getParent());
assertSame(subdir2Node, fsdir2.getINode4Write(subdir2.toString()));
diffList = dir2Node.getDiffs().asList();
assertEquals(1, diffList.size());
diff = diffList.get(0);
assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
}
Aggregations