use of org.apache.hadoop.hdfs.server.namenode.INode in project hadoop by apache.
the class TestRenameWithSnapshots method testRenameUndo_4.
/**
* Test undo where dst node being overwritten is a reference node
*/
@Test
public void testRenameUndo_4() throws Exception {
final Path sdir1 = new Path("/dir1");
final Path sdir2 = new Path("/dir2");
final Path sdir3 = new Path("/dir3");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
hdfs.mkdirs(sdir3);
final Path foo = new Path(sdir1, "foo");
final Path bar = new Path(foo, "bar");
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
final Path foo2 = new Path(sdir2, "foo2");
hdfs.mkdirs(foo2);
SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
// rename foo2 to foo3, so that foo3 will be a reference node
final Path foo3 = new Path(sdir3, "foo3");
hdfs.rename(foo2, foo3);
INode foo3Node = fsdir.getINode4Write(foo3.toString());
assertTrue(foo3Node.isReference());
INodeDirectory dir3 = fsdir.getINode4Write(sdir3.toString()).asDirectory();
INodeDirectory mockDir3 = spy(dir3);
// fail the rename but succeed in undo
doReturn(false).when(mockDir3).addChild((INode) Mockito.isNull(), anyBoolean(), Mockito.anyInt());
Mockito.when(mockDir3.addChild((INode) Mockito.isNotNull(), anyBoolean(), Mockito.anyInt())).thenReturn(false).thenCallRealMethod();
INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
root.replaceChild(dir3, mockDir3, fsdir.getINodeMap());
foo3Node.setParent(mockDir3);
try {
hdfs.rename(foo, foo3, Rename.OVERWRITE);
fail("the rename from " + foo + " to " + foo3 + " should fail");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("rename from " + foo + " to " + foo3 + " failed.", e);
}
// make sure the undo is correct
final INode foo3Node_undo = fsdir.getINode4Write(foo3.toString());
assertSame(foo3Node, foo3Node_undo);
INodeReference.WithCount foo3_wc = (WithCount) foo3Node.asReference().getReferredINode();
assertEquals(2, foo3_wc.getReferenceCount());
assertSame(foo3Node, foo3_wc.getParentReference());
}
use of org.apache.hadoop.hdfs.server.namenode.INode in project hadoop by apache.
the class TestSetQuotaWithSnapshot method testClearQuota.
/**
* Test clear quota of a snapshottable dir or a dir with snapshot.
*/
@Test
public void testClearQuota() throws Exception {
final Path dir = new Path("/TestSnapshot");
hdfs.mkdirs(dir);
hdfs.allowSnapshot(dir);
hdfs.setQuota(dir, HdfsConstants.QUOTA_DONT_SET, HdfsConstants.QUOTA_DONT_SET);
INodeDirectory dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
assertTrue(dirNode.isSnapshottable());
assertEquals(0, dirNode.getDiffs().asList().size());
hdfs.setQuota(dir, HdfsConstants.QUOTA_DONT_SET - 1, HdfsConstants.QUOTA_DONT_SET - 1);
dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
assertTrue(dirNode.isSnapshottable());
assertEquals(0, dirNode.getDiffs().asList().size());
hdfs.setQuota(dir, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_RESET);
dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
assertTrue(dirNode.isSnapshottable());
assertEquals(0, dirNode.getDiffs().asList().size());
// allow snapshot on dir and create snapshot s1
SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
// clear quota of dir
hdfs.setQuota(dir, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_RESET);
// dir should still be a snapshottable directory
dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
assertTrue(dirNode.isSnapshottable());
assertEquals(1, dirNode.getDiffs().asList().size());
SnapshottableDirectoryStatus[] status = hdfs.getSnapshottableDirListing();
assertEquals(1, status.length);
assertEquals(dir, status[0].getFullPath());
final Path subDir = new Path(dir, "sub");
hdfs.mkdirs(subDir);
hdfs.createSnapshot(dir, "s2");
final Path file = new Path(subDir, "file");
DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed);
hdfs.setQuota(dir, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_RESET);
INode subNode = fsdir.getINode4Write(subDir.toString());
assertTrue(subNode.asDirectory().isWithSnapshot());
List<DirectoryDiff> diffList = subNode.asDirectory().getDiffs().asList();
assertEquals(1, diffList.size());
Snapshot s2 = dirNode.getSnapshot(DFSUtil.string2Bytes("s2"));
assertEquals(s2.getId(), diffList.get(0).getSnapshotId());
List<INode> createdList = diffList.get(0).getChildrenDiff().getList(ListType.CREATED);
assertEquals(1, createdList.size());
assertSame(fsdir.getINode4Write(file.toString()), createdList.get(0));
}
use of org.apache.hadoop.hdfs.server.namenode.INode in project hadoop by apache.
the class TestRenameWithSnapshots method testRenameDirAndDeleteSnapshot_7.
/**
* Unit test for HDFS-4842.
*/
@Test
public void testRenameDirAndDeleteSnapshot_7() throws Exception {
fsn.getSnapshotManager().setAllowNestedSnapshots(true);
final Path test = new Path("/test");
final Path dir1 = new Path(test, "dir1");
final Path dir2 = new Path(test, "dir2");
hdfs.mkdirs(dir1);
hdfs.mkdirs(dir2);
final Path foo = new Path(dir2, "foo");
final Path bar = new Path(foo, "bar");
final Path file = new Path(bar, "file");
DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPL, SEED);
// take a snapshot s0 and s1 on /test
SnapshotTestHelper.createSnapshot(hdfs, test, "s0");
SnapshotTestHelper.createSnapshot(hdfs, test, "s1");
// delete file so we have a snapshot copy for s1 in bar
hdfs.delete(file, true);
// create another snapshot on dir2
SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");
// rename foo from dir2 to dir1
final Path newfoo = new Path(dir1, foo.getName());
hdfs.rename(foo, newfoo);
// delete snapshot s1
hdfs.deleteSnapshot(test, "s1");
// make sure the snapshot copy of file in s1 is merged to s0. For
// HDFS-4842, we need to make sure that we do not wrongly use s2 as the
// prior snapshot of s1.
final Path file_s2 = SnapshotTestHelper.getSnapshotPath(dir2, "s2", "foo/bar/file");
assertFalse(hdfs.exists(file_s2));
final Path file_s0 = SnapshotTestHelper.getSnapshotPath(test, "s0", "dir2/foo/bar/file");
assertTrue(hdfs.exists(file_s0));
// check dir1: foo should be in the created list of s0
INodeDirectory dir1Node = fsdir.getINode4Write(dir1.toString()).asDirectory();
List<DirectoryDiff> dir1DiffList = dir1Node.getDiffs().asList();
assertEquals(1, dir1DiffList.size());
List<INode> dList = dir1DiffList.get(0).getChildrenDiff().getList(ListType.DELETED);
assertTrue(dList.isEmpty());
List<INode> cList = dir1DiffList.get(0).getChildrenDiff().getList(ListType.CREATED);
assertEquals(1, cList.size());
INode cNode = cList.get(0);
INode fooNode = fsdir.getINode4Write(newfoo.toString());
assertSame(cNode, fooNode);
// check foo and its subtree
final Path newbar = new Path(newfoo, bar.getName());
INodeDirectory barNode = fsdir.getINode4Write(newbar.toString()).asDirectory();
assertSame(fooNode.asDirectory(), barNode.getParent());
// bar should only have a snapshot diff for s0
List<DirectoryDiff> barDiffList = barNode.getDiffs().asList();
assertEquals(1, barDiffList.size());
DirectoryDiff diff = barDiffList.get(0);
INodeDirectory testNode = fsdir.getINode4Write(test.toString()).asDirectory();
Snapshot s0 = testNode.getSnapshot(DFSUtil.string2Bytes("s0"));
assertEquals(s0.getId(), diff.getSnapshotId());
// and file should be stored in the deleted list of this snapshot diff
assertEquals("file", diff.getChildrenDiff().getList(ListType.DELETED).get(0).getLocalName());
// check dir2: a WithName instance for foo should be in the deleted list
// of the snapshot diff for s2
INodeDirectory dir2Node = fsdir.getINode4Write(dir2.toString()).asDirectory();
List<DirectoryDiff> dir2DiffList = dir2Node.getDiffs().asList();
// dir2Node should contain 1 snapshot diffs for s2
assertEquals(1, dir2DiffList.size());
dList = dir2DiffList.get(0).getChildrenDiff().getList(ListType.DELETED);
assertEquals(1, dList.size());
final Path foo_s2 = SnapshotTestHelper.getSnapshotPath(dir2, "s2", foo.getName());
INodeReference.WithName fooNode_s2 = (INodeReference.WithName) fsdir.getINode(foo_s2.toString());
assertSame(dList.get(0), fooNode_s2);
assertSame(fooNode.asReference().getReferredINode(), fooNode_s2.getReferredINode());
restartClusterAndCheckImage(true);
}
use of org.apache.hadoop.hdfs.server.namenode.INode in project hadoop by apache.
the class TestRenameWithSnapshots method testRenameUndo_1.
/**
* Test the undo section of rename. Before the rename, we create the renamed
* file/dir before taking the snapshot.
*/
@Test
public void testRenameUndo_1() throws Exception {
final Path sdir1 = new Path("/dir1");
final Path sdir2 = new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo = new Path(sdir1, "foo");
final Path bar = new Path(foo, "bar");
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
final Path dir2file = new Path(sdir2, "file");
DFSTestUtil.createFile(hdfs, dir2file, BLOCKSIZE, REPL, SEED);
SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
INodeDirectory dir2 = fsdir.getINode4Write(sdir2.toString()).asDirectory();
INodeDirectory mockDir2 = spy(dir2);
doReturn(false).when(mockDir2).addChild((INode) anyObject(), anyBoolean(), Mockito.anyInt());
INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
root.replaceChild(dir2, mockDir2, fsdir.getINodeMap());
final Path newfoo = new Path(sdir2, "foo");
boolean result = hdfs.rename(foo, newfoo);
assertFalse(result);
// check the current internal details
INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString()).asDirectory();
Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
ReadOnlyList<INode> dir1Children = dir1Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1, dir1Children.size());
assertEquals(foo.getName(), dir1Children.get(0).getLocalName());
List<DirectoryDiff> dir1Diffs = dir1Node.getDiffs().asList();
assertEquals(1, dir1Diffs.size());
assertEquals(s1.getId(), dir1Diffs.get(0).getSnapshotId());
// after the undo of rename, both the created and deleted list of sdir1
// should be empty
ChildrenDiff childrenDiff = dir1Diffs.get(0).getChildrenDiff();
assertEquals(0, childrenDiff.getList(ListType.DELETED).size());
assertEquals(0, childrenDiff.getList(ListType.CREATED).size());
INode fooNode = fsdir.getINode4Write(foo.toString());
assertTrue(fooNode.isDirectory() && fooNode.asDirectory().isWithSnapshot());
List<DirectoryDiff> fooDiffs = fooNode.asDirectory().getDiffs().asList();
assertEquals(1, fooDiffs.size());
assertEquals(s1.getId(), fooDiffs.get(0).getSnapshotId());
final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1", "foo");
INode fooNode_s1 = fsdir.getINode(foo_s1.toString());
assertTrue(fooNode_s1 == fooNode);
// check sdir2
assertFalse(hdfs.exists(newfoo));
INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString()).asDirectory();
assertFalse(dir2Node.isWithSnapshot());
ReadOnlyList<INode> dir2Children = dir2Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1, dir2Children.size());
assertEquals(dir2file.getName(), dir2Children.get(0).getLocalName());
}
use of org.apache.hadoop.hdfs.server.namenode.INode in project hadoop by apache.
the class TestNestedSnapshots method testDisallowNestedSnapshottableDir.
/**
* When we have nested snapshottable directories and if we try to reset the
* snapshottable descendant back to an regular directory, we need to replace
* the snapshottable descendant with an INodeDirectoryWithSnapshot
*/
@Test
public void testDisallowNestedSnapshottableDir() throws Exception {
cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);
final Path dir = new Path("/dir");
final Path sub = new Path(dir, "sub");
hdfs.mkdirs(sub);
SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
final Path file = new Path(sub, "file");
DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, SEED);
FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
INode subNode = fsdir.getINode(sub.toString());
assertTrue(subNode.asDirectory().isWithSnapshot());
hdfs.allowSnapshot(sub);
subNode = fsdir.getINode(sub.toString());
assertTrue(subNode.isDirectory() && subNode.asDirectory().isSnapshottable());
hdfs.disallowSnapshot(sub);
subNode = fsdir.getINode(sub.toString());
assertTrue(subNode.asDirectory().isWithSnapshot());
}
Aggregations