Search in sources :

Example 61 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class FSDirAppendOp method prepareFileForAppend.

/**
   * Convert current node to under construction.
   * Recreate in-memory lease record.
   *
   * @param fsn namespace
   * @param iip inodes in the path containing the file
   * @param leaseHolder identifier of the lease holder on this file
   * @param clientMachine identifier of the client machine
   * @param newBlock if the data is appended to a new block
   * @param writeToEditLog whether to persist this change to the edit log
   * @param logRetryCache whether to record RPC ids in editlog for retry cache
   *                      rebuilding
   * @return the last block locations if the block is partial or null otherwise
   * @throws IOException
   */
static LocatedBlock prepareFileForAppend(final FSNamesystem fsn, final INodesInPath iip, final String leaseHolder, final String clientMachine, final boolean newBlock, final boolean writeToEditLog, final boolean logRetryCache) throws IOException {
    assert fsn.hasWriteLock();
    final INodeFile file = iip.getLastINode().asFile();
    final QuotaCounts delta = verifyQuotaForUCBlock(fsn, file, iip);
    file.recordModification(iip.getLatestSnapshotId());
    file.toUnderConstruction(leaseHolder, clientMachine);
    fsn.getLeaseManager().addLease(file.getFileUnderConstructionFeature().getClientName(), file.getId());
    LocatedBlock ret = null;
    if (!newBlock) {
        FSDirectory fsd = fsn.getFSDirectory();
        ret = fsd.getBlockManager().convertLastBlockToUnderConstruction(file, 0);
        if (ret != null && delta != null) {
            Preconditions.checkState(delta.getStorageSpace() >= 0, "appending to" + " a block with size larger than the preferred block size");
            fsd.writeLock();
            try {
                fsd.updateCountNoQuotaCheck(iip, iip.length() - 1, delta);
            } finally {
                fsd.writeUnlock();
            }
        }
    } else {
        BlockInfo lastBlock = file.getLastBlock();
        if (lastBlock != null) {
            ExtendedBlock blk = new ExtendedBlock(fsn.getBlockPoolId(), lastBlock);
            ret = new LocatedBlock(blk, new DatanodeInfo[0]);
        }
    }
    if (writeToEditLog) {
        final String path = iip.getPath();
        if (NameNodeLayoutVersion.supports(Feature.APPEND_NEW_BLOCK, fsn.getEffectiveLayoutVersion())) {
            fsn.getEditLog().logAppendFile(path, file, newBlock, logRetryCache);
        } else {
            fsn.getEditLog().logOpenFile(path, file, false, logRetryCache);
        }
    }
    return ret;
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock)

Example 62 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class FSDirAttrOp method unprotectedSetReplication.

static BlockInfo[] unprotectedSetReplication(FSDirectory fsd, INodesInPath iip, short replication) throws QuotaExceededException, UnresolvedLinkException, SnapshotAccessControlException, UnsupportedActionException {
    assert fsd.hasWriteLock();
    final BlockManager bm = fsd.getBlockManager();
    final INode inode = iip.getLastINode();
    if (inode == null || !inode.isFile() || inode.asFile().isStriped()) {
        // TODO we do not support replication on stripe layout files yet
        return null;
    }
    INodeFile file = inode.asFile();
    // Make sure the directory has sufficient quotas
    short oldBR = file.getPreferredBlockReplication();
    long size = file.computeFileSize(true, true);
    // Ensure the quota does not exceed
    if (oldBR < replication) {
        fsd.updateCount(iip, 0L, size, oldBR, replication, true);
    }
    file.setFileReplication(replication, iip.getLatestSnapshotId());
    short targetReplication = (short) Math.max(replication, file.getPreferredBlockReplication());
    if (oldBR > replication) {
        fsd.updateCount(iip, 0L, size, oldBR, targetReplication, true);
    }
    for (BlockInfo b : file.getBlocks()) {
        bm.setReplication(oldBR, targetReplication, b);
    }
    if (oldBR != -1) {
        if (oldBR > targetReplication) {
            FSDirectory.LOG.info("Decreasing replication from {} to {} for {}", oldBR, targetReplication, iip.getPath());
        } else {
            FSDirectory.LOG.info("Increasing replication from {} to {} for {}", oldBR, targetReplication, iip.getPath());
        }
    }
    return file.getBlocks();
}
Also used : BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)

Example 63 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class FSDirectory method updateReplicationFactor.

/**
   * Tell the block manager to update the replication factors when delete
   * happens. Deleting a file or a snapshot might decrease the replication
   * factor of the blocks as the blocks are always replicated to the highest
   * replication factor among all snapshots.
   */
void updateReplicationFactor(Collection<UpdatedReplicationInfo> blocks) {
    BlockManager bm = getBlockManager();
    for (UpdatedReplicationInfo e : blocks) {
        BlockInfo b = e.block();
        bm.setReplication(b.getReplication(), e.targetReplication(), b);
    }
}
Also used : BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) UpdatedReplicationInfo(org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo.UpdatedReplicationInfo)

Example 64 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class FSDirTruncateOp method unprotectedTruncate.

/**
   * Unprotected truncate implementation. Unlike
   * {@link FSDirTruncateOp#truncate}, this will not schedule block recovery.
   *
   * @param fsn namespace
   * @param iip path name
   * @param clientName client name
   * @param clientMachine client machine info
   * @param newLength the target file size
   * @param mtime modified time
   * @param truncateBlock truncate block
   * @throws IOException
   */
static void unprotectedTruncate(final FSNamesystem fsn, final INodesInPath iip, final String clientName, final String clientMachine, final long newLength, final long mtime, final Block truncateBlock) throws UnresolvedLinkException, QuotaExceededException, SnapshotAccessControlException, IOException {
    assert fsn.hasWriteLock();
    FSDirectory fsd = fsn.getFSDirectory();
    INodeFile file = iip.getLastINode().asFile();
    BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
    boolean onBlockBoundary = unprotectedTruncate(fsn, iip, newLength, collectedBlocks, mtime, null);
    if (!onBlockBoundary) {
        BlockInfo oldBlock = file.getLastBlock();
        Block tBlk = prepareFileForTruncate(fsn, iip, clientName, clientMachine, file.computeFileSize() - newLength, truncateBlock);
        assert Block.matchingIdAndGenStamp(tBlk, truncateBlock) && tBlk.getNumBytes() == truncateBlock.getNumBytes() : "Should be the same block.";
        if (oldBlock.getBlockId() != tBlk.getBlockId() && !file.isBlockInLatestSnapshot(oldBlock)) {
            oldBlock.delete();
            fsd.getBlockManager().removeBlockFromMap(oldBlock);
        }
    }
    assert onBlockBoundary == (truncateBlock == null) : "truncateBlock is null iff on block boundary: " + truncateBlock;
    fsn.getBlockManager().removeBlocksAndUpdateSafemodeTotal(collectedBlocks);
}
Also used : BlocksMapUpdateInfo(org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) Block(org.apache.hadoop.hdfs.protocol.Block)

Example 65 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class FSDirTruncateOp method truncate.

/**
   * Truncate a file to a given size.
   *
   * @param fsn namespace
   * @param srcArg path name
   * @param newLength the target file size
   * @param clientName client name
   * @param clientMachine client machine info
   * @param mtime modified time
   * @param toRemoveBlocks to be removed blocks
   * @param pc permission checker to check fs permission
   * @return tuncate result
   * @throws IOException
   */
static TruncateResult truncate(final FSNamesystem fsn, final String srcArg, final long newLength, final String clientName, final String clientMachine, final long mtime, final BlocksMapUpdateInfo toRemoveBlocks, final FSPermissionChecker pc) throws IOException, UnresolvedLinkException {
    assert fsn.hasWriteLock();
    FSDirectory fsd = fsn.getFSDirectory();
    final String src;
    final INodesInPath iip;
    final boolean onBlockBoundary;
    Block truncateBlock = null;
    fsd.writeLock();
    try {
        iip = fsd.resolvePath(pc, srcArg, DirOp.WRITE);
        src = iip.getPath();
        if (fsd.isPermissionEnabled()) {
            fsd.checkPathAccess(pc, iip, FsAction.WRITE);
        }
        INodeFile file = INodeFile.valueOf(iip.getLastINode(), src);
        // not support truncating file with striped blocks
        if (file.isStriped()) {
            throw new UnsupportedOperationException("Cannot truncate file with striped block " + src);
        }
        final BlockStoragePolicy lpPolicy = fsd.getBlockManager().getStoragePolicy("LAZY_PERSIST");
        if (lpPolicy != null && lpPolicy.getId() == file.getStoragePolicyID()) {
            throw new UnsupportedOperationException("Cannot truncate lazy persist file " + src);
        }
        // Check if the file is already being truncated with the same length
        final BlockInfo last = file.getLastBlock();
        if (last != null && last.getBlockUCState() == BlockUCState.UNDER_RECOVERY) {
            final Block truncatedBlock = last.getUnderConstructionFeature().getTruncateBlock();
            if (truncatedBlock != null) {
                final long truncateLength = file.computeFileSize(false, false) + truncatedBlock.getNumBytes();
                if (newLength == truncateLength) {
                    return new TruncateResult(false, fsd.getAuditFileInfo(iip));
                }
            }
        }
        // Opening an existing file for truncate. May need lease recovery.
        fsn.recoverLeaseInternal(RecoverLeaseOp.TRUNCATE_FILE, iip, src, clientName, clientMachine, false);
        // Truncate length check.
        long oldLength = file.computeFileSize();
        if (oldLength == newLength) {
            return new TruncateResult(true, fsd.getAuditFileInfo(iip));
        }
        if (oldLength < newLength) {
            throw new HadoopIllegalArgumentException("Cannot truncate to a larger file size. Current size: " + oldLength + ", truncate size: " + newLength + ".");
        }
        // Perform INodeFile truncation.
        final QuotaCounts delta = new QuotaCounts.Builder().build();
        onBlockBoundary = unprotectedTruncate(fsn, iip, newLength, toRemoveBlocks, mtime, delta);
        if (!onBlockBoundary) {
            // Open file for write, but don't log into edits
            long lastBlockDelta = file.computeFileSize() - newLength;
            assert lastBlockDelta > 0 : "delta is 0 only if on block bounday";
            truncateBlock = prepareFileForTruncate(fsn, iip, clientName, clientMachine, lastBlockDelta, null);
        }
        // update the quota: use the preferred block size for UC block
        fsd.updateCountNoQuotaCheck(iip, iip.length() - 1, delta);
    } finally {
        fsd.writeUnlock();
    }
    fsn.getEditLog().logTruncate(src, clientName, clientMachine, newLength, mtime, truncateBlock);
    return new TruncateResult(onBlockBoundary, fsd.getAuditFileInfo(iip));
}
Also used : HadoopIllegalArgumentException(org.apache.hadoop.HadoopIllegalArgumentException) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) Block(org.apache.hadoop.hdfs.protocol.Block) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)

Aggregations

BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)84 Test (org.junit.Test)28 Path (org.apache.hadoop.fs.Path)27 Block (org.apache.hadoop.hdfs.protocol.Block)22 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)19 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)16 INodeFile (org.apache.hadoop.hdfs.server.namenode.INodeFile)14 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)13 IOException (java.io.IOException)11 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)11 BlockInfoContiguous (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous)11 BlockInfoStriped (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped)11 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)10 DatanodeStorageInfo (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo)7 Configuration (org.apache.hadoop.conf.Configuration)6 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)6 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)6 BlockStoragePolicy (org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)5 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)5 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)5