Search in sources :

Example 1 with ReclaimContext

use of org.apache.hadoop.hdfs.server.namenode.INode.ReclaimContext in project hadoop by apache.

the class FSDirDeleteOp method deleteForEditLog.

/**
   * Delete a path from the name space
   * Update the count at each ancestor directory with quota
   * <br>
   * Note: This is to be used by
   * {@link org.apache.hadoop.hdfs.server.namenode.FSEditLog} only.
   * <br>
   *
   * @param fsd the FSDirectory instance
   * @param src a string representation of a path to an inode
   * @param mtime the time the inode is removed
   */
static void deleteForEditLog(FSDirectory fsd, INodesInPath iip, long mtime) throws IOException {
    assert fsd.hasWriteLock();
    FSNamesystem fsn = fsd.getFSNamesystem();
    BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
    List<INode> removedINodes = new ChunkedArrayList<>();
    List<Long> removedUCFiles = new ChunkedArrayList<>();
    if (!deleteAllowed(iip)) {
        return;
    }
    List<INodeDirectory> snapshottableDirs = new ArrayList<>();
    FSDirSnapshotOp.checkSnapshot(fsd, iip, snapshottableDirs);
    boolean filesRemoved = unprotectedDelete(fsd, iip, new ReclaimContext(fsd.getBlockStoragePolicySuite(), collectedBlocks, removedINodes, removedUCFiles), mtime);
    fsn.removeSnapshottableDirs(snapshottableDirs);
    if (filesRemoved) {
        fsn.removeLeasesAndINodes(removedUCFiles, removedINodes, false);
        fsn.getBlockManager().removeBlocksAndUpdateSafemodeTotal(collectedBlocks);
    }
}
Also used : BlocksMapUpdateInfo(org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo) ChunkedArrayList(org.apache.hadoop.util.ChunkedArrayList) ArrayList(java.util.ArrayList) ReclaimContext(org.apache.hadoop.hdfs.server.namenode.INode.ReclaimContext) ChunkedArrayList(org.apache.hadoop.util.ChunkedArrayList)

Example 2 with ReclaimContext

use of org.apache.hadoop.hdfs.server.namenode.INode.ReclaimContext in project hadoop by apache.

the class FSDirDeleteOp method delete.

/**
   * Delete the target directory and collect the blocks under it
   *
   * @param fsd the FSDirectory instance
   * @param iip the INodesInPath instance containing all the INodes for the path
   * @param collectedBlocks Blocks under the deleted directory
   * @param removedINodes INodes that should be removed from inodeMap
   * @return the number of files that have been removed
   */
static long delete(FSDirectory fsd, INodesInPath iip, BlocksMapUpdateInfo collectedBlocks, List<INode> removedINodes, List<Long> removedUCFiles, long mtime) throws IOException {
    if (NameNode.stateChangeLog.isDebugEnabled()) {
        NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: " + iip.getPath());
    }
    long filesRemoved = -1;
    FSNamesystem fsn = fsd.getFSNamesystem();
    fsd.writeLock();
    try {
        if (deleteAllowed(iip)) {
            List<INodeDirectory> snapshottableDirs = new ArrayList<>();
            FSDirSnapshotOp.checkSnapshot(fsd, iip, snapshottableDirs);
            ReclaimContext context = new ReclaimContext(fsd.getBlockStoragePolicySuite(), collectedBlocks, removedINodes, removedUCFiles);
            if (unprotectedDelete(fsd, iip, context, mtime)) {
                filesRemoved = context.quotaDelta().getNsDelta();
            }
            fsd.updateReplicationFactor(context.collectedBlocks().toUpdateReplicationInfo());
            fsn.removeSnapshottableDirs(snapshottableDirs);
            fsd.updateCount(iip, context.quotaDelta(), false);
        }
    } finally {
        fsd.writeUnlock();
    }
    return filesRemoved;
}
Also used : ChunkedArrayList(org.apache.hadoop.util.ChunkedArrayList) ArrayList(java.util.ArrayList) ReclaimContext(org.apache.hadoop.hdfs.server.namenode.INode.ReclaimContext)

Aggregations

ArrayList (java.util.ArrayList)2 ReclaimContext (org.apache.hadoop.hdfs.server.namenode.INode.ReclaimContext)2 ChunkedArrayList (org.apache.hadoop.util.ChunkedArrayList)2 BlocksMapUpdateInfo (org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo)1