use of org.apache.hadoop.hdfs.server.namenode.INodeDirectory in project hadoop by apache.
the class DirectorySnapshottableFeature method findRenameTargetPath.
/**
* We just found a deleted WithName node as the source of a rename operation.
* However, we should include it in our snapshot diff report as rename only
* if the rename target is also under the same snapshottable directory.
*/
private byte[][] findRenameTargetPath(final INodeDirectory snapshotRoot, INodeReference.WithName wn, final int snapshotId) {
INode inode = wn.getReferredINode();
final LinkedList<byte[]> ancestors = Lists.newLinkedList();
while (inode != null) {
if (inode == snapshotRoot) {
return ancestors.toArray(new byte[ancestors.size()][]);
}
if (inode instanceof INodeReference.WithCount) {
inode = ((WithCount) inode).getParentRef(snapshotId);
} else {
INode parent = inode.getParentReference() != null ? inode.getParentReference() : inode.getParent();
if (parent != null && parent instanceof INodeDirectory) {
int sid = parent.asDirectory().searchChild(inode);
if (sid < snapshotId) {
return null;
}
}
if (!(parent instanceof WithCount)) {
ancestors.addFirst(inode.getLocalNameBytes());
}
inode = parent;
}
}
return null;
}
use of org.apache.hadoop.hdfs.server.namenode.INodeDirectory in project hadoop by apache.
the class DirectoryWithSnapshotFeature method cleanDeletedINode.
/**
* Clean an inode while we move it from the deleted list of post to the
* deleted list of prior.
* @param reclaimContext blocks and inodes that need to be reclaimed
* @param inode The inode to clean.
* @param post The post snapshot.
* @param prior The id of the prior snapshot.
*/
private static void cleanDeletedINode(INode.ReclaimContext reclaimContext, INode inode, final int post, final int prior) {
Deque<INode> queue = new ArrayDeque<>();
queue.addLast(inode);
while (!queue.isEmpty()) {
INode topNode = queue.pollFirst();
if (topNode instanceof INodeReference.WithName) {
INodeReference.WithName wn = (INodeReference.WithName) topNode;
if (wn.getLastSnapshotId() >= post) {
INodeReference.WithCount wc = (INodeReference.WithCount) wn.getReferredINode();
if (wc.getLastWithName() == wn && wc.getParentReference() == null) {
// this wn is the last wn inside of the wc, also the dstRef node has
// been deleted. In this case, we should treat the referred file/dir
// as normal case
queue.add(wc.getReferredINode());
} else {
wn.cleanSubtree(reclaimContext, post, prior);
}
}
// For DstReference node, since the node is not in the created list of
// prior, we should treat it as regular file/dir
} else if (topNode.isFile() && topNode.asFile().isWithSnapshot()) {
INodeFile file = topNode.asFile();
file.getDiffs().deleteSnapshotDiff(reclaimContext, post, prior, file);
} else if (topNode.isDirectory()) {
INodeDirectory dir = topNode.asDirectory();
ChildrenDiff priorChildrenDiff = null;
DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature();
if (sf != null) {
// delete files/dirs created after prior. Note that these
// files/dirs, along with inode, were deleted right after post.
DirectoryDiff priorDiff = sf.getDiffs().getDiffById(prior);
if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
priorChildrenDiff = priorDiff.getChildrenDiff();
priorChildrenDiff.destroyCreatedList(reclaimContext, dir);
}
}
for (INode child : dir.getChildrenList(prior)) {
if (priorChildrenDiff != null && priorChildrenDiff.search(ListType.DELETED, child.getLocalNameBytes()) != null) {
continue;
}
queue.addLast(child);
}
}
}
}
use of org.apache.hadoop.hdfs.server.namenode.INodeDirectory in project hadoop by apache.
the class DirectoryWithSnapshotFeature method destroyDstSubtree.
/**
* Destroy a subtree under a DstReference node.
*/
public static void destroyDstSubtree(INode.ReclaimContext reclaimContext, INode inode, final int snapshot, final int prior) {
Preconditions.checkArgument(prior != NO_SNAPSHOT_ID);
if (inode.isReference()) {
if (inode instanceof INodeReference.WithName && snapshot != Snapshot.CURRENT_STATE_ID) {
// this inode has been renamed before the deletion of the DstReference
// subtree
inode.cleanSubtree(reclaimContext, snapshot, prior);
} else {
// for DstReference node, continue this process to its subtree
destroyDstSubtree(reclaimContext, inode.asReference().getReferredINode(), snapshot, prior);
}
} else if (inode.isFile()) {
inode.cleanSubtree(reclaimContext, snapshot, prior);
} else if (inode.isDirectory()) {
Map<INode, INode> excludedNodes = null;
INodeDirectory dir = inode.asDirectory();
DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature();
if (sf != null) {
DirectoryDiffList diffList = sf.getDiffs();
DirectoryDiff priorDiff = diffList.getDiffById(prior);
if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
List<INode> dList = priorDiff.diff.getList(ListType.DELETED);
excludedNodes = cloneDiffList(dList);
}
if (snapshot != Snapshot.CURRENT_STATE_ID) {
diffList.deleteSnapshotDiff(reclaimContext, snapshot, prior, dir);
}
priorDiff = diffList.getDiffById(prior);
if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
priorDiff.diff.destroyCreatedList(reclaimContext, dir);
}
}
for (INode child : inode.asDirectory().getChildrenList(prior)) {
if (excludedNodes != null && excludedNodes.containsKey(child)) {
continue;
}
destroyDstSubtree(reclaimContext, child, snapshot, prior);
}
}
}
use of org.apache.hadoop.hdfs.server.namenode.INodeDirectory in project hadoop by apache.
the class SnapshotManager method getSnapshottableDirListing.
/**
* List all the snapshottable directories that are owned by the current user.
* @param userName Current user name.
* @return Snapshottable directories that are owned by the current user,
* represented as an array of {@link SnapshottableDirectoryStatus}. If
* {@code userName} is null, return all the snapshottable dirs.
*/
public SnapshottableDirectoryStatus[] getSnapshottableDirListing(String userName) {
if (snapshottables.isEmpty()) {
return null;
}
List<SnapshottableDirectoryStatus> statusList = new ArrayList<SnapshottableDirectoryStatus>();
for (INodeDirectory dir : snapshottables.values()) {
if (userName == null || userName.equals(dir.getUserName())) {
SnapshottableDirectoryStatus status = new SnapshottableDirectoryStatus(dir.getModificationTime(), dir.getAccessTime(), dir.getFsPermission(), dir.getUserName(), dir.getGroupName(), dir.getLocalNameBytes(), dir.getId(), dir.getChildrenNum(Snapshot.CURRENT_STATE_ID), dir.getDirectorySnapshottableFeature().getNumSnapshots(), dir.getDirectorySnapshottableFeature().getSnapshotQuota(), dir.getParent() == null ? DFSUtilClient.EMPTY_BYTES : DFSUtil.string2Bytes(dir.getParent().getFullPathName()));
statusList.add(status);
}
}
Collections.sort(statusList, SnapshottableDirectoryStatus.COMPARATOR);
return statusList.toArray(new SnapshottableDirectoryStatus[statusList.size()]);
}
use of org.apache.hadoop.hdfs.server.namenode.INodeDirectory in project hadoop by apache.
the class SnapshotManager method setSnapshottable.
/**
* Set the given directory as a snapshottable directory.
* If the path is already a snapshottable directory, update the quota.
*/
public void setSnapshottable(final String path, boolean checkNestedSnapshottable) throws IOException {
final INodesInPath iip = fsdir.getINodesInPath(path, DirOp.WRITE);
final INodeDirectory d = INodeDirectory.valueOf(iip.getLastINode(), path);
if (checkNestedSnapshottable) {
checkNestedSnapshottable(d, path);
}
if (d.isSnapshottable()) {
//The directory is already a snapshottable directory.
d.setSnapshotQuota(DirectorySnapshottableFeature.SNAPSHOT_LIMIT);
} else {
d.addSnapshottableFeature();
}
addSnapshottable(d);
}
Aggregations