Search in sources :

Example 96 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class PendingReconstructionBlocks method metaSave.

/**
   * Iterate through all items and print them.
   */
void metaSave(PrintWriter out) {
    synchronized (pendingReconstructions) {
        out.println("Metasave: Blocks being reconstructed: " + pendingReconstructions.size());
        for (Map.Entry<BlockInfo, PendingBlockInfo> entry : pendingReconstructions.entrySet()) {
            PendingBlockInfo pendingBlock = entry.getValue();
            Block block = entry.getKey();
            out.println(block + " StartTime: " + new Time(pendingBlock.timeStamp) + " NumReconstructInProgress: " + pendingBlock.getNumReplicas());
        }
    }
}
Also used : Block(org.apache.hadoop.hdfs.protocol.Block) Time(java.sql.Time) HashMap(java.util.HashMap) Map(java.util.Map)

Example 97 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class FSDirTruncateOp method unprotectedTruncate.

/**
   * Unprotected truncate implementation. Unlike
   * {@link FSDirTruncateOp#truncate}, this will not schedule block recovery.
   *
   * @param fsn namespace
   * @param iip path name
   * @param clientName client name
   * @param clientMachine client machine info
   * @param newLength the target file size
   * @param mtime modified time
   * @param truncateBlock truncate block
   * @throws IOException
   */
static void unprotectedTruncate(final FSNamesystem fsn, final INodesInPath iip, final String clientName, final String clientMachine, final long newLength, final long mtime, final Block truncateBlock) throws UnresolvedLinkException, QuotaExceededException, SnapshotAccessControlException, IOException {
    assert fsn.hasWriteLock();
    FSDirectory fsd = fsn.getFSDirectory();
    INodeFile file = iip.getLastINode().asFile();
    BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
    boolean onBlockBoundary = unprotectedTruncate(fsn, iip, newLength, collectedBlocks, mtime, null);
    if (!onBlockBoundary) {
        BlockInfo oldBlock = file.getLastBlock();
        Block tBlk = prepareFileForTruncate(fsn, iip, clientName, clientMachine, file.computeFileSize() - newLength, truncateBlock);
        assert Block.matchingIdAndGenStamp(tBlk, truncateBlock) && tBlk.getNumBytes() == truncateBlock.getNumBytes() : "Should be the same block.";
        if (oldBlock.getBlockId() != tBlk.getBlockId() && !file.isBlockInLatestSnapshot(oldBlock)) {
            oldBlock.delete();
            fsd.getBlockManager().removeBlockFromMap(oldBlock);
        }
    }
    assert onBlockBoundary == (truncateBlock == null) : "truncateBlock is null iff on block boundary: " + truncateBlock;
    fsn.getBlockManager().removeBlocksAndUpdateSafemodeTotal(collectedBlocks);
}
Also used : BlocksMapUpdateInfo(org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) Block(org.apache.hadoop.hdfs.protocol.Block)

Example 98 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class FSDirTruncateOp method truncate.

/**
   * Truncate a file to a given size.
   *
   * @param fsn namespace
   * @param srcArg path name
   * @param newLength the target file size
   * @param clientName client name
   * @param clientMachine client machine info
   * @param mtime modified time
   * @param toRemoveBlocks to be removed blocks
   * @param pc permission checker to check fs permission
   * @return tuncate result
   * @throws IOException
   */
static TruncateResult truncate(final FSNamesystem fsn, final String srcArg, final long newLength, final String clientName, final String clientMachine, final long mtime, final BlocksMapUpdateInfo toRemoveBlocks, final FSPermissionChecker pc) throws IOException, UnresolvedLinkException {
    assert fsn.hasWriteLock();
    FSDirectory fsd = fsn.getFSDirectory();
    final String src;
    final INodesInPath iip;
    final boolean onBlockBoundary;
    Block truncateBlock = null;
    fsd.writeLock();
    try {
        iip = fsd.resolvePath(pc, srcArg, DirOp.WRITE);
        src = iip.getPath();
        if (fsd.isPermissionEnabled()) {
            fsd.checkPathAccess(pc, iip, FsAction.WRITE);
        }
        INodeFile file = INodeFile.valueOf(iip.getLastINode(), src);
        // not support truncating file with striped blocks
        if (file.isStriped()) {
            throw new UnsupportedOperationException("Cannot truncate file with striped block " + src);
        }
        final BlockStoragePolicy lpPolicy = fsd.getBlockManager().getStoragePolicy("LAZY_PERSIST");
        if (lpPolicy != null && lpPolicy.getId() == file.getStoragePolicyID()) {
            throw new UnsupportedOperationException("Cannot truncate lazy persist file " + src);
        }
        // Check if the file is already being truncated with the same length
        final BlockInfo last = file.getLastBlock();
        if (last != null && last.getBlockUCState() == BlockUCState.UNDER_RECOVERY) {
            final Block truncatedBlock = last.getUnderConstructionFeature().getTruncateBlock();
            if (truncatedBlock != null) {
                final long truncateLength = file.computeFileSize(false, false) + truncatedBlock.getNumBytes();
                if (newLength == truncateLength) {
                    return new TruncateResult(false, fsd.getAuditFileInfo(iip));
                }
            }
        }
        // Opening an existing file for truncate. May need lease recovery.
        fsn.recoverLeaseInternal(RecoverLeaseOp.TRUNCATE_FILE, iip, src, clientName, clientMachine, false);
        // Truncate length check.
        long oldLength = file.computeFileSize();
        if (oldLength == newLength) {
            return new TruncateResult(true, fsd.getAuditFileInfo(iip));
        }
        if (oldLength < newLength) {
            throw new HadoopIllegalArgumentException("Cannot truncate to a larger file size. Current size: " + oldLength + ", truncate size: " + newLength + ".");
        }
        // Perform INodeFile truncation.
        final QuotaCounts delta = new QuotaCounts.Builder().build();
        onBlockBoundary = unprotectedTruncate(fsn, iip, newLength, toRemoveBlocks, mtime, delta);
        if (!onBlockBoundary) {
            // Open file for write, but don't log into edits
            long lastBlockDelta = file.computeFileSize() - newLength;
            assert lastBlockDelta > 0 : "delta is 0 only if on block bounday";
            truncateBlock = prepareFileForTruncate(fsn, iip, clientName, clientMachine, lastBlockDelta, null);
        }
        // update the quota: use the preferred block size for UC block
        fsd.updateCountNoQuotaCheck(iip, iip.length() - 1, delta);
    } finally {
        fsd.writeUnlock();
    }
    fsn.getEditLog().logTruncate(src, clientName, clientMachine, newLength, mtime, truncateBlock);
    return new TruncateResult(onBlockBoundary, fsd.getAuditFileInfo(iip));
}
Also used : HadoopIllegalArgumentException(org.apache.hadoop.HadoopIllegalArgumentException) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) Block(org.apache.hadoop.hdfs.protocol.Block) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)

Example 99 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class FSDirWriteFileOp method analyzeFileState.

private static FileState analyzeFileState(FSNamesystem fsn, INodesInPath iip, long fileId, String clientName, ExtendedBlock previous, LocatedBlock[] onRetryBlock) throws IOException {
    assert fsn.hasReadLock();
    String src = iip.getPath();
    checkBlock(fsn, previous);
    onRetryBlock[0] = null;
    fsn.checkNameNodeSafeMode("Cannot add block to " + src);
    // have we exceeded the configured limit of fs objects.
    fsn.checkFsObjectLimit();
    Block previousBlock = ExtendedBlock.getLocalBlock(previous);
    final INodeFile file = fsn.checkLease(iip, clientName, fileId);
    BlockInfo lastBlockInFile = file.getLastBlock();
    if (!Block.matchingIdAndGenStamp(previousBlock, lastBlockInFile)) {
        // The block that the client claims is the current last block
        // doesn't match up with what we think is the last block. There are
        // four possibilities:
        // 1) This is the first block allocation of an append() pipeline
        //    which started appending exactly at or exceeding the block boundary.
        //    In this case, the client isn't passed the previous block,
        //    so it makes the allocateBlock() call with previous=null.
        //    We can distinguish this since the last block of the file
        //    will be exactly a full block.
        // 2) This is a retry from a client that missed the response of a
        //    prior getAdditionalBlock() call, perhaps because of a network
        //    timeout, or because of an HA failover. In that case, we know
        //    by the fact that the client is re-issuing the RPC that it
        //    never began to write to the old block. Hence it is safe to
        //    to return the existing block.
        // 3) This is an entirely bogus request/bug -- we should error out
        //    rather than potentially appending a new block with an empty
        //    one in the middle, etc
        // 4) This is a retry from a client that timed out while
        //    the prior getAdditionalBlock() is still being processed,
        //    currently working on chooseTarget().
        //    There are no means to distinguish between the first and
        //    the second attempts in Part I, because the first one hasn't
        //    changed the namesystem state yet.
        //    We run this analysis again in Part II where case 4 is impossible.
        BlockInfo penultimateBlock = file.getPenultimateBlock();
        if (previous == null && lastBlockInFile != null && lastBlockInFile.getNumBytes() >= file.getPreferredBlockSize() && lastBlockInFile.isComplete()) {
            // Case 1
            if (NameNode.stateChangeLog.isDebugEnabled()) {
                NameNode.stateChangeLog.debug("BLOCK* NameSystem.allocateBlock: handling block allocation" + " writing to a file with a complete previous block: src=" + src + " lastBlock=" + lastBlockInFile);
            }
        } else if (Block.matchingIdAndGenStamp(penultimateBlock, previousBlock)) {
            if (lastBlockInFile.getNumBytes() != 0) {
                throw new IOException("Request looked like a retry to allocate block " + lastBlockInFile + " but it already contains " + lastBlockInFile.getNumBytes() + " bytes");
            }
            // Case 2
            // Return the last block.
            NameNode.stateChangeLog.info("BLOCK* allocateBlock: caught retry for " + "allocation of a new block in " + src + ". Returning previously" + " allocated block " + lastBlockInFile);
            long offset = file.computeFileSize();
            BlockUnderConstructionFeature uc = lastBlockInFile.getUnderConstructionFeature();
            onRetryBlock[0] = makeLocatedBlock(fsn, lastBlockInFile, uc.getExpectedStorageLocations(), offset);
            return new FileState(file, src, iip);
        } else {
            // Case 3
            throw new IOException("Cannot allocate block in " + src + ": " + "passed 'previous' block " + previous + " does not match actual " + "last block in file " + lastBlockInFile);
        }
    }
    return new FileState(file, src, iip);
}
Also used : BlockUnderConstructionFeature(org.apache.hadoop.hdfs.server.blockmanagement.BlockUnderConstructionFeature) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) IOException(java.io.IOException)

Example 100 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class FSNamesystem method createNewBlock.

/**
   * Create new block with a unique block id and a new generation stamp.
   * @param blockType is the file under striping or contiguous layout?
   */
Block createNewBlock(BlockType blockType) throws IOException {
    assert hasWriteLock();
    Block b = new Block(nextBlockId(blockType), 0, 0);
    // Increment the generation stamp for every new block.
    b.setGenerationStamp(nextGenerationStamp(false));
    return b;
}
Also used : LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block)

Aggregations

Block (org.apache.hadoop.hdfs.protocol.Block)155 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)79 Test (org.junit.Test)77 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)74 Path (org.apache.hadoop.fs.Path)28 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)26 IOException (java.io.IOException)24 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)22 Configuration (org.apache.hadoop.conf.Configuration)20 ReceivedDeletedBlockInfo (org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo)18 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)17 BlockInfoContiguous (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous)17 CachedBlock (org.apache.hadoop.hdfs.server.namenode.CachedBlock)17 BlockInfoStriped (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped)15 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)14 ArrayList (java.util.ArrayList)12 RecoveringBlock (org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock)11 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)11 FsPermission (org.apache.hadoop.fs.permission.FsPermission)10 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)10