Search in sources :

Example 21 with INodeDirectory

use of org.apache.hadoop.hdfs.server.namenode.INodeDirectory in project hadoop by apache.

the class CacheReplicationMonitor method rescanCacheDirectives.

/**
   * Scan all CacheDirectives.  Use the information to figure out
   * what cache replication factor each block should have.
   */
private void rescanCacheDirectives() {
    FSDirectory fsDir = namesystem.getFSDirectory();
    final long now = new Date().getTime();
    for (CacheDirective directive : cacheManager.getCacheDirectives()) {
        scannedDirectives++;
        // Skip processing this entry if it has expired
        if (directive.getExpiryTime() > 0 && directive.getExpiryTime() <= now) {
            LOG.debug("Directive {}: the directive expired at {} (now = {})", directive.getId(), directive.getExpiryTime(), now);
            continue;
        }
        String path = directive.getPath();
        INode node;
        try {
            node = fsDir.getINode(path, DirOp.READ);
        } catch (IOException e) {
            // We don't cache through symlinks or invalid paths
            LOG.debug("Directive {}: Failed to resolve path {} ({})", directive.getId(), path, e.getMessage());
            continue;
        }
        if (node == null) {
            LOG.debug("Directive {}: No inode found at {}", directive.getId(), path);
        } else if (node.isDirectory()) {
            INodeDirectory dir = node.asDirectory();
            ReadOnlyList<INode> children = dir.getChildrenList(Snapshot.CURRENT_STATE_ID);
            for (INode child : children) {
                if (child.isFile()) {
                    rescanFile(directive, child.asFile());
                }
            }
        } else if (node.isFile()) {
            rescanFile(directive, node.asFile());
        } else {
            LOG.debug("Directive {}: ignoring non-directive, non-file inode {} ", directive.getId(), node);
        }
    }
}
Also used : INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) INode(org.apache.hadoop.hdfs.server.namenode.INode) ReadOnlyList(org.apache.hadoop.hdfs.util.ReadOnlyList) CacheDirective(org.apache.hadoop.hdfs.protocol.CacheDirective) FSDirectory(org.apache.hadoop.hdfs.server.namenode.FSDirectory) IOException(java.io.IOException) Date(java.util.Date)

Example 22 with INodeDirectory

use of org.apache.hadoop.hdfs.server.namenode.INodeDirectory in project hadoop by apache.

the class DirectorySnapshottableFeature method computeDiffRecursively.

/**
   * Recursively compute the difference between snapshots under a given
   * directory/file.
   * @param snapshotRoot The directory where snapshots were taken.
   * @param node The directory/file under which the diff is computed.
   * @param parentPath Relative path (corresponding to the snapshot root) of
   *                   the node's parent.
   * @param diffReport data structure used to store the diff.
   */
private void computeDiffRecursively(final INodeDirectory snapshotRoot, INode node, List<byte[]> parentPath, SnapshotDiffInfo diffReport) {
    final Snapshot earlierSnapshot = diffReport.isFromEarlier() ? diffReport.getFrom() : diffReport.getTo();
    final Snapshot laterSnapshot = diffReport.isFromEarlier() ? diffReport.getTo() : diffReport.getFrom();
    byte[][] relativePath = parentPath.toArray(new byte[parentPath.size()][]);
    if (node.isDirectory()) {
        final ChildrenDiff diff = new ChildrenDiff();
        INodeDirectory dir = node.asDirectory();
        DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature();
        if (sf != null) {
            boolean change = sf.computeDiffBetweenSnapshots(earlierSnapshot, laterSnapshot, diff, dir);
            if (change) {
                diffReport.addDirDiff(dir, relativePath, diff);
            }
        }
        ReadOnlyList<INode> children = dir.getChildrenList(earlierSnapshot.getId());
        for (INode child : children) {
            final byte[] name = child.getLocalNameBytes();
            boolean toProcess = diff.searchIndex(ListType.DELETED, name) < 0;
            if (!toProcess && child instanceof INodeReference.WithName) {
                byte[][] renameTargetPath = findRenameTargetPath(snapshotRoot, (WithName) child, laterSnapshot == null ? Snapshot.CURRENT_STATE_ID : laterSnapshot.getId());
                if (renameTargetPath != null) {
                    toProcess = true;
                    diffReport.setRenameTarget(child.getId(), renameTargetPath);
                }
            }
            if (toProcess) {
                parentPath.add(name);
                computeDiffRecursively(snapshotRoot, child, parentPath, diffReport);
                parentPath.remove(parentPath.size() - 1);
            }
        }
    } else if (node.isFile() && node.asFile().isWithSnapshot()) {
        INodeFile file = node.asFile();
        boolean change = file.getFileWithSnapshotFeature().changedBetweenSnapshots(file, earlierSnapshot, laterSnapshot);
        if (change) {
            diffReport.addFileDiff(file, relativePath);
        }
    }
}
Also used : INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) SnapshotAndINode(org.apache.hadoop.hdfs.server.namenode.INodeDirectory.SnapshotAndINode) INode(org.apache.hadoop.hdfs.server.namenode.INode) WithName(org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName) INodeReference(org.apache.hadoop.hdfs.server.namenode.INodeReference) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile)

Example 23 with INodeDirectory

use of org.apache.hadoop.hdfs.server.namenode.INodeDirectory in project hadoop by apache.

the class DirectorySnapshottableFeature method renameSnapshot.

/**
   * Rename a snapshot
   * @param path
   *          The directory path where the snapshot was taken. Used for
   *          generating exception message.
   * @param oldName
   *          Old name of the snapshot
   * @param newName
   *          New name the snapshot will be renamed to
   * @throws SnapshotException
   *           Throw SnapshotException when either the snapshot with the old
   *           name does not exist or a snapshot with the new name already
   *           exists
   */
public void renameSnapshot(String path, String oldName, String newName) throws SnapshotException {
    if (newName.equals(oldName)) {
        return;
    }
    final int indexOfOld = searchSnapshot(DFSUtil.string2Bytes(oldName));
    if (indexOfOld < 0) {
        throw new SnapshotException("The snapshot " + oldName + " does not exist for directory " + path);
    } else {
        final byte[] newNameBytes = DFSUtil.string2Bytes(newName);
        int indexOfNew = searchSnapshot(newNameBytes);
        if (indexOfNew >= 0) {
            throw new SnapshotException("The snapshot " + newName + " already exists for directory " + path);
        }
        // remove the one with old name from snapshotsByNames
        Snapshot snapshot = snapshotsByNames.remove(indexOfOld);
        final INodeDirectory ssRoot = snapshot.getRoot();
        ssRoot.setLocalName(newNameBytes);
        indexOfNew = -indexOfNew - 1;
        if (indexOfNew <= indexOfOld) {
            snapshotsByNames.add(indexOfNew, snapshot);
        } else {
            // indexOfNew > indexOfOld
            snapshotsByNames.add(indexOfNew - 1, snapshot);
        }
    }
}
Also used : INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) SnapshotException(org.apache.hadoop.hdfs.protocol.SnapshotException)

Example 24 with INodeDirectory

use of org.apache.hadoop.hdfs.server.namenode.INodeDirectory in project hadoop by apache.

the class DirectorySnapshottableFeature method findRenameTargetPath.

/**
   * We just found a deleted WithName node as the source of a rename operation.
   * However, we should include it in our snapshot diff report as rename only
   * if the rename target is also under the same snapshottable directory.
   */
private byte[][] findRenameTargetPath(final INodeDirectory snapshotRoot, INodeReference.WithName wn, final int snapshotId) {
    INode inode = wn.getReferredINode();
    final LinkedList<byte[]> ancestors = Lists.newLinkedList();
    while (inode != null) {
        if (inode == snapshotRoot) {
            return ancestors.toArray(new byte[ancestors.size()][]);
        }
        if (inode instanceof INodeReference.WithCount) {
            inode = ((WithCount) inode).getParentRef(snapshotId);
        } else {
            INode parent = inode.getParentReference() != null ? inode.getParentReference() : inode.getParent();
            if (parent != null && parent instanceof INodeDirectory) {
                int sid = parent.asDirectory().searchChild(inode);
                if (sid < snapshotId) {
                    return null;
                }
            }
            if (!(parent instanceof WithCount)) {
                ancestors.addFirst(inode.getLocalNameBytes());
            }
            inode = parent;
        }
    }
    return null;
}
Also used : INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) SnapshotAndINode(org.apache.hadoop.hdfs.server.namenode.INodeDirectory.SnapshotAndINode) INode(org.apache.hadoop.hdfs.server.namenode.INode) WithCount(org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount)

Example 25 with INodeDirectory

use of org.apache.hadoop.hdfs.server.namenode.INodeDirectory in project hadoop by apache.

the class DirectoryWithSnapshotFeature method cleanDeletedINode.

/**
   * Clean an inode while we move it from the deleted list of post to the
   * deleted list of prior.
   * @param reclaimContext blocks and inodes that need to be reclaimed
   * @param inode The inode to clean.
   * @param post The post snapshot.
   * @param prior The id of the prior snapshot.
   */
private static void cleanDeletedINode(INode.ReclaimContext reclaimContext, INode inode, final int post, final int prior) {
    Deque<INode> queue = new ArrayDeque<>();
    queue.addLast(inode);
    while (!queue.isEmpty()) {
        INode topNode = queue.pollFirst();
        if (topNode instanceof INodeReference.WithName) {
            INodeReference.WithName wn = (INodeReference.WithName) topNode;
            if (wn.getLastSnapshotId() >= post) {
                INodeReference.WithCount wc = (INodeReference.WithCount) wn.getReferredINode();
                if (wc.getLastWithName() == wn && wc.getParentReference() == null) {
                    // this wn is the last wn inside of the wc, also the dstRef node has
                    // been deleted. In this case, we should treat the referred file/dir
                    // as normal case
                    queue.add(wc.getReferredINode());
                } else {
                    wn.cleanSubtree(reclaimContext, post, prior);
                }
            }
        // For DstReference node, since the node is not in the created list of
        // prior, we should treat it as regular file/dir
        } else if (topNode.isFile() && topNode.asFile().isWithSnapshot()) {
            INodeFile file = topNode.asFile();
            file.getDiffs().deleteSnapshotDiff(reclaimContext, post, prior, file);
        } else if (topNode.isDirectory()) {
            INodeDirectory dir = topNode.asDirectory();
            ChildrenDiff priorChildrenDiff = null;
            DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature();
            if (sf != null) {
                // delete files/dirs created after prior. Note that these
                // files/dirs, along with inode, were deleted right after post.
                DirectoryDiff priorDiff = sf.getDiffs().getDiffById(prior);
                if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
                    priorChildrenDiff = priorDiff.getChildrenDiff();
                    priorChildrenDiff.destroyCreatedList(reclaimContext, dir);
                }
            }
            for (INode child : dir.getChildrenList(prior)) {
                if (priorChildrenDiff != null && priorChildrenDiff.search(ListType.DELETED, child.getLocalNameBytes()) != null) {
                    continue;
                }
                queue.addLast(child);
            }
        }
    }
}
Also used : INode(org.apache.hadoop.hdfs.server.namenode.INode) ArrayDeque(java.util.ArrayDeque) INodeReference(org.apache.hadoop.hdfs.server.namenode.INodeReference) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory)

Aggregations

INodeDirectory (org.apache.hadoop.hdfs.server.namenode.INodeDirectory)46 Test (org.junit.Test)29 Path (org.apache.hadoop.fs.Path)26 INodesInPath (org.apache.hadoop.hdfs.server.namenode.INodesInPath)22 INode (org.apache.hadoop.hdfs.server.namenode.INode)21 DirectoryDiff (org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff)14 INodeReference (org.apache.hadoop.hdfs.server.namenode.INodeReference)12 INodeFile (org.apache.hadoop.hdfs.server.namenode.INodeFile)11 WithCount (org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount)7 QuotaCounts (org.apache.hadoop.hdfs.server.namenode.QuotaCounts)6 IOException (java.io.IOException)5 FileStatus (org.apache.hadoop.fs.FileStatus)5 SnapshotException (org.apache.hadoop.hdfs.protocol.SnapshotException)5 ChildrenDiff (org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.ChildrenDiff)5 FSDirectory (org.apache.hadoop.hdfs.server.namenode.FSDirectory)4 NSQuotaExceededException (org.apache.hadoop.hdfs.protocol.NSQuotaExceededException)3 SnapshottableDirectoryStatus (org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus)3 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)2 SnapshotAndINode (org.apache.hadoop.hdfs.server.namenode.INodeDirectory.SnapshotAndINode)2 FileNotFoundException (java.io.FileNotFoundException)1