Search in sources :

Example 41 with INodeDirectory

use of org.apache.hadoop.hdfs.server.namenode.INodeDirectory in project hadoop by apache.

the class DirectorySnapshottableFeature method findRenameTargetPath.

/**
   * We just found a deleted WithName node as the source of a rename operation.
   * However, we should include it in our snapshot diff report as rename only
   * if the rename target is also under the same snapshottable directory.
   */
private byte[][] findRenameTargetPath(final INodeDirectory snapshotRoot, INodeReference.WithName wn, final int snapshotId) {
    INode inode = wn.getReferredINode();
    final LinkedList<byte[]> ancestors = Lists.newLinkedList();
    while (inode != null) {
        if (inode == snapshotRoot) {
            return ancestors.toArray(new byte[ancestors.size()][]);
        }
        if (inode instanceof INodeReference.WithCount) {
            inode = ((WithCount) inode).getParentRef(snapshotId);
        } else {
            INode parent = inode.getParentReference() != null ? inode.getParentReference() : inode.getParent();
            if (parent != null && parent instanceof INodeDirectory) {
                int sid = parent.asDirectory().searchChild(inode);
                if (sid < snapshotId) {
                    return null;
                }
            }
            if (!(parent instanceof WithCount)) {
                ancestors.addFirst(inode.getLocalNameBytes());
            }
            inode = parent;
        }
    }
    return null;
}
Also used : INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) SnapshotAndINode(org.apache.hadoop.hdfs.server.namenode.INodeDirectory.SnapshotAndINode) INode(org.apache.hadoop.hdfs.server.namenode.INode) WithCount(org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount)

Example 42 with INodeDirectory

use of org.apache.hadoop.hdfs.server.namenode.INodeDirectory in project hadoop by apache.

the class DirectoryWithSnapshotFeature method cleanDeletedINode.

/**
   * Clean an inode while we move it from the deleted list of post to the
   * deleted list of prior.
   * @param reclaimContext blocks and inodes that need to be reclaimed
   * @param inode The inode to clean.
   * @param post The post snapshot.
   * @param prior The id of the prior snapshot.
   */
private static void cleanDeletedINode(INode.ReclaimContext reclaimContext, INode inode, final int post, final int prior) {
    Deque<INode> queue = new ArrayDeque<>();
    queue.addLast(inode);
    while (!queue.isEmpty()) {
        INode topNode = queue.pollFirst();
        if (topNode instanceof INodeReference.WithName) {
            INodeReference.WithName wn = (INodeReference.WithName) topNode;
            if (wn.getLastSnapshotId() >= post) {
                INodeReference.WithCount wc = (INodeReference.WithCount) wn.getReferredINode();
                if (wc.getLastWithName() == wn && wc.getParentReference() == null) {
                    // this wn is the last wn inside of the wc, also the dstRef node has
                    // been deleted. In this case, we should treat the referred file/dir
                    // as normal case
                    queue.add(wc.getReferredINode());
                } else {
                    wn.cleanSubtree(reclaimContext, post, prior);
                }
            }
        // For DstReference node, since the node is not in the created list of
        // prior, we should treat it as regular file/dir
        } else if (topNode.isFile() && topNode.asFile().isWithSnapshot()) {
            INodeFile file = topNode.asFile();
            file.getDiffs().deleteSnapshotDiff(reclaimContext, post, prior, file);
        } else if (topNode.isDirectory()) {
            INodeDirectory dir = topNode.asDirectory();
            ChildrenDiff priorChildrenDiff = null;
            DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature();
            if (sf != null) {
                // delete files/dirs created after prior. Note that these
                // files/dirs, along with inode, were deleted right after post.
                DirectoryDiff priorDiff = sf.getDiffs().getDiffById(prior);
                if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
                    priorChildrenDiff = priorDiff.getChildrenDiff();
                    priorChildrenDiff.destroyCreatedList(reclaimContext, dir);
                }
            }
            for (INode child : dir.getChildrenList(prior)) {
                if (priorChildrenDiff != null && priorChildrenDiff.search(ListType.DELETED, child.getLocalNameBytes()) != null) {
                    continue;
                }
                queue.addLast(child);
            }
        }
    }
}
Also used : INode(org.apache.hadoop.hdfs.server.namenode.INode) ArrayDeque(java.util.ArrayDeque) INodeReference(org.apache.hadoop.hdfs.server.namenode.INodeReference) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory)

Example 43 with INodeDirectory

use of org.apache.hadoop.hdfs.server.namenode.INodeDirectory in project hadoop by apache.

the class DirectoryWithSnapshotFeature method destroyDstSubtree.

/**
   * Destroy a subtree under a DstReference node.
   */
public static void destroyDstSubtree(INode.ReclaimContext reclaimContext, INode inode, final int snapshot, final int prior) {
    Preconditions.checkArgument(prior != NO_SNAPSHOT_ID);
    if (inode.isReference()) {
        if (inode instanceof INodeReference.WithName && snapshot != Snapshot.CURRENT_STATE_ID) {
            // this inode has been renamed before the deletion of the DstReference
            // subtree
            inode.cleanSubtree(reclaimContext, snapshot, prior);
        } else {
            // for DstReference node, continue this process to its subtree
            destroyDstSubtree(reclaimContext, inode.asReference().getReferredINode(), snapshot, prior);
        }
    } else if (inode.isFile()) {
        inode.cleanSubtree(reclaimContext, snapshot, prior);
    } else if (inode.isDirectory()) {
        Map<INode, INode> excludedNodes = null;
        INodeDirectory dir = inode.asDirectory();
        DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature();
        if (sf != null) {
            DirectoryDiffList diffList = sf.getDiffs();
            DirectoryDiff priorDiff = diffList.getDiffById(prior);
            if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
                List<INode> dList = priorDiff.diff.getList(ListType.DELETED);
                excludedNodes = cloneDiffList(dList);
            }
            if (snapshot != Snapshot.CURRENT_STATE_ID) {
                diffList.deleteSnapshotDiff(reclaimContext, snapshot, prior, dir);
            }
            priorDiff = diffList.getDiffById(prior);
            if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
                priorDiff.diff.destroyCreatedList(reclaimContext, dir);
            }
        }
        for (INode child : inode.asDirectory().getChildrenList(prior)) {
            if (excludedNodes != null && excludedNodes.containsKey(child)) {
                continue;
            }
            destroyDstSubtree(reclaimContext, child, snapshot, prior);
        }
    }
}
Also used : INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) INode(org.apache.hadoop.hdfs.server.namenode.INode) INodeReference(org.apache.hadoop.hdfs.server.namenode.INodeReference)

Example 44 with INodeDirectory

use of org.apache.hadoop.hdfs.server.namenode.INodeDirectory in project hadoop by apache.

the class SnapshotManager method getSnapshottableDirListing.

/**
   * List all the snapshottable directories that are owned by the current user.
   * @param userName Current user name.
   * @return Snapshottable directories that are owned by the current user,
   *         represented as an array of {@link SnapshottableDirectoryStatus}. If
   *         {@code userName} is null, return all the snapshottable dirs.
   */
public SnapshottableDirectoryStatus[] getSnapshottableDirListing(String userName) {
    if (snapshottables.isEmpty()) {
        return null;
    }
    List<SnapshottableDirectoryStatus> statusList = new ArrayList<SnapshottableDirectoryStatus>();
    for (INodeDirectory dir : snapshottables.values()) {
        if (userName == null || userName.equals(dir.getUserName())) {
            SnapshottableDirectoryStatus status = new SnapshottableDirectoryStatus(dir.getModificationTime(), dir.getAccessTime(), dir.getFsPermission(), dir.getUserName(), dir.getGroupName(), dir.getLocalNameBytes(), dir.getId(), dir.getChildrenNum(Snapshot.CURRENT_STATE_ID), dir.getDirectorySnapshottableFeature().getNumSnapshots(), dir.getDirectorySnapshottableFeature().getSnapshotQuota(), dir.getParent() == null ? DFSUtilClient.EMPTY_BYTES : DFSUtil.string2Bytes(dir.getParent().getFullPathName()));
            statusList.add(status);
        }
    }
    Collections.sort(statusList, SnapshottableDirectoryStatus.COMPARATOR);
    return statusList.toArray(new SnapshottableDirectoryStatus[statusList.size()]);
}
Also used : INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) ArrayList(java.util.ArrayList) SnapshottableDirectoryStatus(org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus)

Example 45 with INodeDirectory

use of org.apache.hadoop.hdfs.server.namenode.INodeDirectory in project hadoop by apache.

the class SnapshotManager method setSnapshottable.

/**
   * Set the given directory as a snapshottable directory.
   * If the path is already a snapshottable directory, update the quota.
   */
public void setSnapshottable(final String path, boolean checkNestedSnapshottable) throws IOException {
    final INodesInPath iip = fsdir.getINodesInPath(path, DirOp.WRITE);
    final INodeDirectory d = INodeDirectory.valueOf(iip.getLastINode(), path);
    if (checkNestedSnapshottable) {
        checkNestedSnapshottable(d, path);
    }
    if (d.isSnapshottable()) {
        //The directory is already a snapshottable directory.
        d.setSnapshotQuota(DirectorySnapshottableFeature.SNAPSHOT_LIMIT);
    } else {
        d.addSnapshottableFeature();
    }
    addSnapshottable(d);
}
Also used : INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) INodesInPath(org.apache.hadoop.hdfs.server.namenode.INodesInPath)

Aggregations

INodeDirectory (org.apache.hadoop.hdfs.server.namenode.INodeDirectory)46 Test (org.junit.Test)29 Path (org.apache.hadoop.fs.Path)26 INodesInPath (org.apache.hadoop.hdfs.server.namenode.INodesInPath)22 INode (org.apache.hadoop.hdfs.server.namenode.INode)21 DirectoryDiff (org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff)14 INodeReference (org.apache.hadoop.hdfs.server.namenode.INodeReference)12 INodeFile (org.apache.hadoop.hdfs.server.namenode.INodeFile)11 WithCount (org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount)7 QuotaCounts (org.apache.hadoop.hdfs.server.namenode.QuotaCounts)6 IOException (java.io.IOException)5 FileStatus (org.apache.hadoop.fs.FileStatus)5 SnapshotException (org.apache.hadoop.hdfs.protocol.SnapshotException)5 ChildrenDiff (org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.ChildrenDiff)5 FSDirectory (org.apache.hadoop.hdfs.server.namenode.FSDirectory)4 NSQuotaExceededException (org.apache.hadoop.hdfs.protocol.NSQuotaExceededException)3 SnapshottableDirectoryStatus (org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus)3 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)2 SnapshotAndINode (org.apache.hadoop.hdfs.server.namenode.INodeDirectory.SnapshotAndINode)2 FileNotFoundException (java.io.FileNotFoundException)1