use of org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo in project hadoop by apache.
the class FSDirDeleteOp method deleteInternal.
/**
* Remove a file/directory from the namespace.
* <p>
* For large directories, deletion is incremental. The blocks under
* the directory are collected and deleted a small number at a time holding
* the {@link org.apache.hadoop.hdfs.server.namenode.FSNamesystem} lock.
* <p>
* For small directory or file the deletion is done in one shot.
* @param fsn namespace
* @param iip the INodesInPath instance containing all the INodes for the path
* @param logRetryCache whether to record RPC ids in editlog for retry cache
* rebuilding
* @return blocks collected from the deleted path
* @throws IOException
*/
static BlocksMapUpdateInfo deleteInternal(FSNamesystem fsn, INodesInPath iip, boolean logRetryCache) throws IOException {
assert fsn.hasWriteLock();
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* NameSystem.delete: " + iip.getPath());
}
FSDirectory fsd = fsn.getFSDirectory();
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
List<INode> removedINodes = new ChunkedArrayList<>();
List<Long> removedUCFiles = new ChunkedArrayList<>();
long mtime = now();
// Unlink the target directory from directory tree
long filesRemoved = delete(fsd, iip, collectedBlocks, removedINodes, removedUCFiles, mtime);
if (filesRemoved < 0) {
return null;
}
fsd.getEditLog().logDelete(iip.getPath(), mtime, logRetryCache);
incrDeletedFileCount(filesRemoved);
fsn.removeLeasesAndINodes(removedUCFiles, removedINodes, true);
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* Namesystem.delete: " + iip.getPath() + " is removed");
}
return collectedBlocks;
}
use of org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo in project hadoop by apache.
the class FSDirRenameOp method renameForEditLog.
/**
* Rename src to dst.
* <br>
* Note: This is to be used by {@link org.apache.hadoop.hdfs.server
* .namenode.FSEditLogLoader} only.
* <br>
*
* @param fsd FSDirectory
* @param src source path
* @param dst destination path
* @param timestamp modification time
* @param options Rename options
*/
static void renameForEditLog(FSDirectory fsd, String src, String dst, long timestamp, Options.Rename... options) throws IOException {
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
final INodesInPath srcIIP = fsd.getINodesInPath(src, DirOp.WRITE_LINK);
final INodesInPath dstIIP = fsd.getINodesInPath(dst, DirOp.WRITE_LINK);
unprotectedRenameTo(fsd, srcIIP, dstIIP, timestamp, collectedBlocks, options);
if (!collectedBlocks.getToDeleteList().isEmpty()) {
fsd.getFSNamesystem().getBlockManager().removeBlocksAndUpdateSafemodeTotal(collectedBlocks);
}
}
use of org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo in project hadoop by apache.
the class FSDirTruncateOp method unprotectedTruncate.
/**
* Unprotected truncate implementation. Unlike
* {@link FSDirTruncateOp#truncate}, this will not schedule block recovery.
*
* @param fsn namespace
* @param iip path name
* @param clientName client name
* @param clientMachine client machine info
* @param newLength the target file size
* @param mtime modified time
* @param truncateBlock truncate block
* @throws IOException
*/
static void unprotectedTruncate(final FSNamesystem fsn, final INodesInPath iip, final String clientName, final String clientMachine, final long newLength, final long mtime, final Block truncateBlock) throws UnresolvedLinkException, QuotaExceededException, SnapshotAccessControlException, IOException {
assert fsn.hasWriteLock();
FSDirectory fsd = fsn.getFSDirectory();
INodeFile file = iip.getLastINode().asFile();
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
boolean onBlockBoundary = unprotectedTruncate(fsn, iip, newLength, collectedBlocks, mtime, null);
if (!onBlockBoundary) {
BlockInfo oldBlock = file.getLastBlock();
Block tBlk = prepareFileForTruncate(fsn, iip, clientName, clientMachine, file.computeFileSize() - newLength, truncateBlock);
assert Block.matchingIdAndGenStamp(tBlk, truncateBlock) && tBlk.getNumBytes() == truncateBlock.getNumBytes() : "Should be the same block.";
if (oldBlock.getBlockId() != tBlk.getBlockId() && !file.isBlockInLatestSnapshot(oldBlock)) {
oldBlock.delete();
fsd.getBlockManager().removeBlockFromMap(oldBlock);
}
}
assert onBlockBoundary == (truncateBlock == null) : "truncateBlock is null iff on block boundary: " + truncateBlock;
fsn.getBlockManager().removeBlocksAndUpdateSafemodeTotal(collectedBlocks);
}
use of org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo in project hadoop by apache.
the class FSDirRenameOp method renameToInt.
/**
* The new rename which has the POSIX semantic.
*/
static RenameResult renameToInt(FSDirectory fsd, final String srcArg, final String dstArg, boolean logRetryCache, Options.Rename... options) throws IOException {
String src = srcArg;
String dst = dstArg;
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* NameSystem.renameTo: with options -" + " " + src + " to " + dst);
}
final FSPermissionChecker pc = fsd.getPermissionChecker();
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
// returns resolved path
return renameTo(fsd, pc, src, dst, collectedBlocks, logRetryCache, options);
}
use of org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo in project hadoop by apache.
the class FSNamesystem method deleteSnapshot.
/**
* Delete a snapshot of a snapshottable directory
* @param snapshotRoot The snapshottable directory
* @param snapshotName The name of the to-be-deleted snapshot
* @throws SafeModeException
* @throws IOException
*/
void deleteSnapshot(String snapshotRoot, String snapshotName, boolean logRetryCache) throws IOException {
final String operationName = "deleteSnapshot";
boolean success = false;
String rootPath = null;
writeLock();
BlocksMapUpdateInfo blocksToBeDeleted = null;
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot delete snapshot for " + snapshotRoot);
rootPath = Snapshot.getSnapshotPath(snapshotRoot, snapshotName);
blocksToBeDeleted = FSDirSnapshotOp.deleteSnapshot(dir, snapshotManager, snapshotRoot, snapshotName, logRetryCache);
success = true;
} catch (AccessControlException ace) {
logAuditEvent(success, operationName, rootPath, null, null);
throw ace;
} finally {
writeUnlock(operationName);
}
getEditLog().logSync();
// global lock
if (blocksToBeDeleted != null) {
removeBlocks(blocksToBeDeleted);
}
logAuditEvent(success, operationName, rootPath, null, null);
}
Aggregations