Search in sources :

Example 6 with BlocksMapUpdateInfo

use of org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo in project hadoop by apache.

the class FSDirDeleteOp method deleteInternal.

/**
   * Remove a file/directory from the namespace.
   * <p>
   * For large directories, deletion is incremental. The blocks under
   * the directory are collected and deleted a small number at a time holding
   * the {@link org.apache.hadoop.hdfs.server.namenode.FSNamesystem} lock.
   * <p>
   * For small directory or file the deletion is done in one shot.
   * @param fsn namespace
   * @param iip the INodesInPath instance containing all the INodes for the path
   * @param logRetryCache whether to record RPC ids in editlog for retry cache
   *          rebuilding
   * @return blocks collected from the deleted path
   * @throws IOException
   */
static BlocksMapUpdateInfo deleteInternal(FSNamesystem fsn, INodesInPath iip, boolean logRetryCache) throws IOException {
    assert fsn.hasWriteLock();
    if (NameNode.stateChangeLog.isDebugEnabled()) {
        NameNode.stateChangeLog.debug("DIR* NameSystem.delete: " + iip.getPath());
    }
    FSDirectory fsd = fsn.getFSDirectory();
    BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
    List<INode> removedINodes = new ChunkedArrayList<>();
    List<Long> removedUCFiles = new ChunkedArrayList<>();
    long mtime = now();
    // Unlink the target directory from directory tree
    long filesRemoved = delete(fsd, iip, collectedBlocks, removedINodes, removedUCFiles, mtime);
    if (filesRemoved < 0) {
        return null;
    }
    fsd.getEditLog().logDelete(iip.getPath(), mtime, logRetryCache);
    incrDeletedFileCount(filesRemoved);
    fsn.removeLeasesAndINodes(removedUCFiles, removedINodes, true);
    if (NameNode.stateChangeLog.isDebugEnabled()) {
        NameNode.stateChangeLog.debug("DIR* Namesystem.delete: " + iip.getPath() + " is removed");
    }
    return collectedBlocks;
}
Also used : BlocksMapUpdateInfo(org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo) ChunkedArrayList(org.apache.hadoop.util.ChunkedArrayList)

Example 7 with BlocksMapUpdateInfo

use of org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo in project hadoop by apache.

the class FSDirRenameOp method renameForEditLog.

/**
   * Rename src to dst.
   * <br>
   * Note: This is to be used by {@link org.apache.hadoop.hdfs.server
   * .namenode.FSEditLogLoader} only.
   * <br>
   *
   * @param fsd       FSDirectory
   * @param src       source path
   * @param dst       destination path
   * @param timestamp modification time
   * @param options   Rename options
   */
static void renameForEditLog(FSDirectory fsd, String src, String dst, long timestamp, Options.Rename... options) throws IOException {
    BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
    final INodesInPath srcIIP = fsd.getINodesInPath(src, DirOp.WRITE_LINK);
    final INodesInPath dstIIP = fsd.getINodesInPath(dst, DirOp.WRITE_LINK);
    unprotectedRenameTo(fsd, srcIIP, dstIIP, timestamp, collectedBlocks, options);
    if (!collectedBlocks.getToDeleteList().isEmpty()) {
        fsd.getFSNamesystem().getBlockManager().removeBlocksAndUpdateSafemodeTotal(collectedBlocks);
    }
}
Also used : BlocksMapUpdateInfo(org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo)

Example 8 with BlocksMapUpdateInfo

use of org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo in project hadoop by apache.

the class FSDirTruncateOp method unprotectedTruncate.

/**
   * Unprotected truncate implementation. Unlike
   * {@link FSDirTruncateOp#truncate}, this will not schedule block recovery.
   *
   * @param fsn namespace
   * @param iip path name
   * @param clientName client name
   * @param clientMachine client machine info
   * @param newLength the target file size
   * @param mtime modified time
   * @param truncateBlock truncate block
   * @throws IOException
   */
static void unprotectedTruncate(final FSNamesystem fsn, final INodesInPath iip, final String clientName, final String clientMachine, final long newLength, final long mtime, final Block truncateBlock) throws UnresolvedLinkException, QuotaExceededException, SnapshotAccessControlException, IOException {
    assert fsn.hasWriteLock();
    FSDirectory fsd = fsn.getFSDirectory();
    INodeFile file = iip.getLastINode().asFile();
    BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
    boolean onBlockBoundary = unprotectedTruncate(fsn, iip, newLength, collectedBlocks, mtime, null);
    if (!onBlockBoundary) {
        BlockInfo oldBlock = file.getLastBlock();
        Block tBlk = prepareFileForTruncate(fsn, iip, clientName, clientMachine, file.computeFileSize() - newLength, truncateBlock);
        assert Block.matchingIdAndGenStamp(tBlk, truncateBlock) && tBlk.getNumBytes() == truncateBlock.getNumBytes() : "Should be the same block.";
        if (oldBlock.getBlockId() != tBlk.getBlockId() && !file.isBlockInLatestSnapshot(oldBlock)) {
            oldBlock.delete();
            fsd.getBlockManager().removeBlockFromMap(oldBlock);
        }
    }
    assert onBlockBoundary == (truncateBlock == null) : "truncateBlock is null iff on block boundary: " + truncateBlock;
    fsn.getBlockManager().removeBlocksAndUpdateSafemodeTotal(collectedBlocks);
}
Also used : BlocksMapUpdateInfo(org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) Block(org.apache.hadoop.hdfs.protocol.Block)

Example 9 with BlocksMapUpdateInfo

use of org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo in project hadoop by apache.

the class FSDirRenameOp method renameToInt.

/**
   * The new rename which has the POSIX semantic.
   */
static RenameResult renameToInt(FSDirectory fsd, final String srcArg, final String dstArg, boolean logRetryCache, Options.Rename... options) throws IOException {
    String src = srcArg;
    String dst = dstArg;
    if (NameNode.stateChangeLog.isDebugEnabled()) {
        NameNode.stateChangeLog.debug("DIR* NameSystem.renameTo: with options -" + " " + src + " to " + dst);
    }
    final FSPermissionChecker pc = fsd.getPermissionChecker();
    BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
    // returns resolved path
    return renameTo(fsd, pc, src, dst, collectedBlocks, logRetryCache, options);
}
Also used : BlocksMapUpdateInfo(org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo)

Example 10 with BlocksMapUpdateInfo

use of org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo in project hadoop by apache.

the class FSNamesystem method deleteSnapshot.

/**
   * Delete a snapshot of a snapshottable directory
   * @param snapshotRoot The snapshottable directory
   * @param snapshotName The name of the to-be-deleted snapshot
   * @throws SafeModeException
   * @throws IOException
   */
void deleteSnapshot(String snapshotRoot, String snapshotName, boolean logRetryCache) throws IOException {
    final String operationName = "deleteSnapshot";
    boolean success = false;
    String rootPath = null;
    writeLock();
    BlocksMapUpdateInfo blocksToBeDeleted = null;
    try {
        checkOperation(OperationCategory.WRITE);
        checkNameNodeSafeMode("Cannot delete snapshot for " + snapshotRoot);
        rootPath = Snapshot.getSnapshotPath(snapshotRoot, snapshotName);
        blocksToBeDeleted = FSDirSnapshotOp.deleteSnapshot(dir, snapshotManager, snapshotRoot, snapshotName, logRetryCache);
        success = true;
    } catch (AccessControlException ace) {
        logAuditEvent(success, operationName, rootPath, null, null);
        throw ace;
    } finally {
        writeUnlock(operationName);
    }
    getEditLog().logSync();
    // global lock
    if (blocksToBeDeleted != null) {
        removeBlocks(blocksToBeDeleted);
    }
    logAuditEvent(success, operationName, rootPath, null, null);
}
Also used : BlocksMapUpdateInfo(org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo) AccessControlException(org.apache.hadoop.security.AccessControlException) SnapshotAccessControlException(org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)

Aggregations

BlocksMapUpdateInfo (org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo)11 SnapshotAccessControlException (org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)4 AccessControlException (org.apache.hadoop.security.AccessControlException)4 ChunkedArrayList (org.apache.hadoop.util.ChunkedArrayList)3 IOException (java.io.IOException)2 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)2 ArrayList (java.util.ArrayList)1 List (java.util.List)1 HadoopIllegalArgumentException (org.apache.hadoop.HadoopIllegalArgumentException)1 FileEncryptionInfo (org.apache.hadoop.fs.FileEncryptionInfo)1 InvalidPathException (org.apache.hadoop.fs.InvalidPathException)1 Block (org.apache.hadoop.hdfs.protocol.Block)1 CacheDirectiveInfo (org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo)1 ErasureCodingPolicy (org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy)1 LastBlockWithStatus (org.apache.hadoop.hdfs.protocol.LastBlockWithStatus)1 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)1 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)1 RollingUpgradeStartupOption (org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption)1 EncryptionKeyInfo (org.apache.hadoop.hdfs.server.namenode.FSDirEncryptionZoneOp.EncryptionKeyInfo)1 AddBlockOp (org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddBlockOp)1