use of org.apache.hadoop.hdfs.server.namenode.INode in project hadoop by apache.
the class TestRenameWithSnapshots method testRenameUndo_3.
/**
* Test the undo section of the second-time rename.
*/
@Test
public void testRenameUndo_3() throws Exception {
final Path sdir1 = new Path("/dir1");
final Path sdir2 = new Path("/dir2");
final Path sdir3 = new Path("/dir3");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
hdfs.mkdirs(sdir3);
final Path foo = new Path(sdir1, "foo");
final Path bar = new Path(foo, "bar");
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
INodeDirectory dir3 = fsdir.getINode4Write(sdir3.toString()).asDirectory();
INodeDirectory mockDir3 = spy(dir3);
doReturn(false).when(mockDir3).addChild((INode) anyObject(), anyBoolean(), Mockito.anyInt());
INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
root.replaceChild(dir3, mockDir3, fsdir.getINodeMap());
final Path foo_dir2 = new Path(sdir2, "foo2");
final Path foo_dir3 = new Path(sdir3, "foo3");
hdfs.rename(foo, foo_dir2);
boolean result = hdfs.rename(foo_dir2, foo_dir3);
assertFalse(result);
// check the current internal details
INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString()).asDirectory();
Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString()).asDirectory();
Snapshot s2 = dir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
ReadOnlyList<INode> dir2Children = dir2Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1, dir2Children.size());
List<DirectoryDiff> dir2Diffs = dir2Node.getDiffs().asList();
assertEquals(1, dir2Diffs.size());
assertEquals(s2.getId(), dir2Diffs.get(0).getSnapshotId());
ChildrenDiff childrenDiff = dir2Diffs.get(0).getChildrenDiff();
assertEquals(0, childrenDiff.getList(ListType.DELETED).size());
assertEquals(1, childrenDiff.getList(ListType.CREATED).size());
final Path foo_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2", "foo2");
assertFalse(hdfs.exists(foo_s2));
INode fooNode = fsdir.getINode4Write(foo_dir2.toString());
assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode);
assertTrue(fooNode instanceof INodeReference.DstReference);
List<DirectoryDiff> fooDiffs = fooNode.asDirectory().getDiffs().asList();
assertEquals(1, fooDiffs.size());
assertEquals(s1.getId(), fooDiffs.get(0).getSnapshotId());
// create snapshot on sdir2 and rename again
hdfs.createSnapshot(sdir2, "s3");
result = hdfs.rename(foo_dir2, foo_dir3);
assertFalse(result);
// check internal details again
dir2Node = fsdir.getINode4Write(sdir2.toString()).asDirectory();
Snapshot s3 = dir2Node.getSnapshot(DFSUtil.string2Bytes("s3"));
fooNode = fsdir.getINode4Write(foo_dir2.toString());
dir2Children = dir2Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1, dir2Children.size());
dir2Diffs = dir2Node.getDiffs().asList();
assertEquals(2, dir2Diffs.size());
assertEquals(s2.getId(), dir2Diffs.get(0).getSnapshotId());
assertEquals(s3.getId(), dir2Diffs.get(1).getSnapshotId());
childrenDiff = dir2Diffs.get(0).getChildrenDiff();
assertEquals(0, childrenDiff.getList(ListType.DELETED).size());
assertEquals(1, childrenDiff.getList(ListType.CREATED).size());
assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode);
childrenDiff = dir2Diffs.get(1).getChildrenDiff();
assertEquals(0, childrenDiff.getList(ListType.DELETED).size());
assertEquals(0, childrenDiff.getList(ListType.CREATED).size());
final Path foo_s3 = SnapshotTestHelper.getSnapshotPath(sdir2, "s3", "foo2");
assertFalse(hdfs.exists(foo_s2));
assertTrue(hdfs.exists(foo_s3));
assertTrue(fooNode instanceof INodeReference.DstReference);
fooDiffs = fooNode.asDirectory().getDiffs().asList();
assertEquals(2, fooDiffs.size());
assertEquals(s1.getId(), fooDiffs.get(0).getSnapshotId());
assertEquals(s3.getId(), fooDiffs.get(1).getSnapshotId());
}
use of org.apache.hadoop.hdfs.server.namenode.INode in project hadoop by apache.
the class TestRenameWithSnapshots method testRenameFromNonSDir2SDir.
/**
* Test rename from a non-snapshottable dir to a snapshottable dir
*/
@Test(timeout = 60000)
public void testRenameFromNonSDir2SDir() throws Exception {
final Path sdir1 = new Path("/dir1");
final Path sdir2 = new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo = new Path(sdir1, "foo");
final Path bar = new Path(foo, "bar");
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
SnapshotTestHelper.createSnapshot(hdfs, sdir2, snap1);
final Path newfoo = new Path(sdir2, "foo");
hdfs.rename(foo, newfoo);
INode fooNode = fsdir.getINode4Write(newfoo.toString());
assertTrue(fooNode instanceof INodeDirectory);
}
use of org.apache.hadoop.hdfs.server.namenode.INode in project hadoop by apache.
the class DirectorySnapshottableFeature method computeDiffRecursively.
/**
* Recursively compute the difference between snapshots under a given
* directory/file.
* @param snapshotRoot The directory where snapshots were taken.
* @param node The directory/file under which the diff is computed.
* @param parentPath Relative path (corresponding to the snapshot root) of
* the node's parent.
* @param diffReport data structure used to store the diff.
*/
private void computeDiffRecursively(final INodeDirectory snapshotRoot, INode node, List<byte[]> parentPath, SnapshotDiffInfo diffReport) {
final Snapshot earlierSnapshot = diffReport.isFromEarlier() ? diffReport.getFrom() : diffReport.getTo();
final Snapshot laterSnapshot = diffReport.isFromEarlier() ? diffReport.getTo() : diffReport.getFrom();
byte[][] relativePath = parentPath.toArray(new byte[parentPath.size()][]);
if (node.isDirectory()) {
final ChildrenDiff diff = new ChildrenDiff();
INodeDirectory dir = node.asDirectory();
DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature();
if (sf != null) {
boolean change = sf.computeDiffBetweenSnapshots(earlierSnapshot, laterSnapshot, diff, dir);
if (change) {
diffReport.addDirDiff(dir, relativePath, diff);
}
}
ReadOnlyList<INode> children = dir.getChildrenList(earlierSnapshot.getId());
for (INode child : children) {
final byte[] name = child.getLocalNameBytes();
boolean toProcess = diff.searchIndex(ListType.DELETED, name) < 0;
if (!toProcess && child instanceof INodeReference.WithName) {
byte[][] renameTargetPath = findRenameTargetPath(snapshotRoot, (WithName) child, laterSnapshot == null ? Snapshot.CURRENT_STATE_ID : laterSnapshot.getId());
if (renameTargetPath != null) {
toProcess = true;
diffReport.setRenameTarget(child.getId(), renameTargetPath);
}
}
if (toProcess) {
parentPath.add(name);
computeDiffRecursively(snapshotRoot, child, parentPath, diffReport);
parentPath.remove(parentPath.size() - 1);
}
}
} else if (node.isFile() && node.asFile().isWithSnapshot()) {
INodeFile file = node.asFile();
boolean change = file.getFileWithSnapshotFeature().changedBetweenSnapshots(file, earlierSnapshot, laterSnapshot);
if (change) {
diffReport.addFileDiff(file, relativePath);
}
}
}
use of org.apache.hadoop.hdfs.server.namenode.INode in project hadoop by apache.
the class DirectorySnapshottableFeature method findRenameTargetPath.
/**
* We just found a deleted WithName node as the source of a rename operation.
* However, we should include it in our snapshot diff report as rename only
* if the rename target is also under the same snapshottable directory.
*/
private byte[][] findRenameTargetPath(final INodeDirectory snapshotRoot, INodeReference.WithName wn, final int snapshotId) {
INode inode = wn.getReferredINode();
final LinkedList<byte[]> ancestors = Lists.newLinkedList();
while (inode != null) {
if (inode == snapshotRoot) {
return ancestors.toArray(new byte[ancestors.size()][]);
}
if (inode instanceof INodeReference.WithCount) {
inode = ((WithCount) inode).getParentRef(snapshotId);
} else {
INode parent = inode.getParentReference() != null ? inode.getParentReference() : inode.getParent();
if (parent != null && parent instanceof INodeDirectory) {
int sid = parent.asDirectory().searchChild(inode);
if (sid < snapshotId) {
return null;
}
}
if (!(parent instanceof WithCount)) {
ancestors.addFirst(inode.getLocalNameBytes());
}
inode = parent;
}
}
return null;
}
use of org.apache.hadoop.hdfs.server.namenode.INode in project hadoop by apache.
the class DirectoryWithSnapshotFeature method cleanDirectory.
public void cleanDirectory(INode.ReclaimContext reclaimContext, final INodeDirectory currentINode, final int snapshot, int prior) {
Map<INode, INode> priorCreated = null;
Map<INode, INode> priorDeleted = null;
QuotaCounts old = reclaimContext.quotaDelta().getCountsCopy();
if (snapshot == Snapshot.CURRENT_STATE_ID) {
// delete the current directory
currentINode.recordModification(prior);
// delete everything in created list
DirectoryDiff lastDiff = diffs.getLast();
if (lastDiff != null) {
lastDiff.diff.destroyCreatedList(reclaimContext, currentINode);
}
currentINode.cleanSubtreeRecursively(reclaimContext, snapshot, prior, null);
} else {
// update prior
prior = getDiffs().updatePrior(snapshot, prior);
// its original created and deleted list before deleting post
if (prior != NO_SNAPSHOT_ID) {
DirectoryDiff priorDiff = this.getDiffs().getDiffById(prior);
if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
List<INode> cList = priorDiff.diff.getList(ListType.CREATED);
List<INode> dList = priorDiff.diff.getList(ListType.DELETED);
priorCreated = cloneDiffList(cList);
priorDeleted = cloneDiffList(dList);
}
}
getDiffs().deleteSnapshotDiff(reclaimContext, snapshot, prior, currentINode);
currentINode.cleanSubtreeRecursively(reclaimContext, snapshot, prior, priorDeleted);
// check priorDiff again since it may be created during the diff deletion
if (prior != NO_SNAPSHOT_ID) {
DirectoryDiff priorDiff = this.getDiffs().getDiffById(prior);
if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
// cleanSubtreeRecursively call.
if (priorCreated != null) {
// we only check the node originally in prior's created list
for (INode cNode : priorDiff.getChildrenDiff().getList(ListType.CREATED)) {
if (priorCreated.containsKey(cNode)) {
cNode.cleanSubtree(reclaimContext, snapshot, NO_SNAPSHOT_ID);
}
}
}
for (INode dNode : priorDiff.getChildrenDiff().getList(ListType.DELETED)) {
if (priorDeleted == null || !priorDeleted.containsKey(dNode)) {
cleanDeletedINode(reclaimContext, dNode, snapshot, prior);
}
}
}
}
}
QuotaCounts current = reclaimContext.quotaDelta().getCountsCopy();
current.subtract(old);
if (currentINode.isQuotaSet()) {
reclaimContext.quotaDelta().addQuotaDirUpdate(currentINode, current);
}
}
Aggregations