Search in sources :

Example 16 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class FSDirAppendOp method computeQuotaDeltaForUCBlock.

/** Compute quota change for converting a complete block to a UC block. */
private static QuotaCounts computeQuotaDeltaForUCBlock(FSNamesystem fsn, INodeFile file) {
    final QuotaCounts delta = new QuotaCounts.Builder().build();
    final BlockInfo lastBlock = file.getLastBlock();
    if (lastBlock != null) {
        final long diff = file.getPreferredBlockSize() - lastBlock.getNumBytes();
        final short repl = lastBlock.getReplication();
        delta.addStorageSpace(diff * repl);
        final BlockStoragePolicy policy = fsn.getFSDirectory().getBlockStoragePolicySuite().getPolicy(file.getStoragePolicyID());
        List<StorageType> types = policy.chooseStorageTypes(repl);
        for (StorageType t : types) {
            if (t.supportTypeQuota()) {
                delta.addTypeSpace(t, diff);
            }
        }
    }
    return delta;
}
Also used : StorageType(org.apache.hadoop.fs.StorageType) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)

Example 17 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class FSDirAppendOp method appendFile.

/**
   * Append to an existing file.
   * <p>
   *
   * The method returns the last block of the file if this is a partial block,
   * which can still be used for writing more data. The client uses the
   * returned block locations to form the data pipeline for this block.<br>
   * The {@link LocatedBlock} will be null if the last block is full.
   * The client then allocates a new block with the next call using
   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#addBlock}.
   * <p>
   *
   * For description of parameters and exceptions thrown see
   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#append}
   *
   * @param fsn namespace
   * @param srcArg path name
   * @param pc permission checker to check fs permission
   * @param holder client name
   * @param clientMachine client machine info
   * @param newBlock if the data is appended to a new block
   * @param logRetryCache whether to record RPC ids in editlog for retry cache
   *                      rebuilding
   * @return the last block with status
   */
static LastBlockWithStatus appendFile(final FSNamesystem fsn, final String srcArg, final FSPermissionChecker pc, final String holder, final String clientMachine, final boolean newBlock, final boolean logRetryCache) throws IOException {
    assert fsn.hasWriteLock();
    final LocatedBlock lb;
    final FSDirectory fsd = fsn.getFSDirectory();
    final INodesInPath iip;
    fsd.writeLock();
    try {
        iip = fsd.resolvePath(pc, srcArg, DirOp.WRITE);
        // Verify that the destination does not exist as a directory already
        final INode inode = iip.getLastINode();
        final String path = iip.getPath();
        if (inode != null && inode.isDirectory()) {
            throw new FileAlreadyExistsException("Cannot append to directory " + path + "; already exists as a directory.");
        }
        if (fsd.isPermissionEnabled()) {
            fsd.checkPathAccess(pc, iip, FsAction.WRITE);
        }
        if (inode == null) {
            throw new FileNotFoundException("Failed to append to non-existent file " + path + " for client " + clientMachine);
        }
        final INodeFile file = INodeFile.valueOf(inode, path, true);
        // not support appending file with striped blocks
        if (file.isStriped()) {
            throw new UnsupportedOperationException("Cannot append to files with striped block " + path);
        }
        BlockManager blockManager = fsd.getBlockManager();
        final BlockStoragePolicy lpPolicy = blockManager.getStoragePolicy("LAZY_PERSIST");
        if (lpPolicy != null && lpPolicy.getId() == file.getStoragePolicyID()) {
            throw new UnsupportedOperationException("Cannot append to lazy persist file " + path);
        }
        // Opening an existing file for append - may need to recover lease.
        fsn.recoverLeaseInternal(RecoverLeaseOp.APPEND_FILE, iip, path, holder, clientMachine, false);
        final BlockInfo lastBlock = file.getLastBlock();
        // Check that the block has at least minimum replication.
        if (lastBlock != null) {
            if (lastBlock.getBlockUCState() == BlockUCState.COMMITTED) {
                throw new RetriableException(new NotReplicatedYetException("append: lastBlock=" + lastBlock + " of src=" + path + " is COMMITTED but not yet COMPLETE."));
            } else if (lastBlock.isComplete() && !blockManager.isSufficientlyReplicated(lastBlock)) {
                throw new IOException("append: lastBlock=" + lastBlock + " of src=" + path + " is not sufficiently replicated yet.");
            }
        }
        lb = prepareFileForAppend(fsn, iip, holder, clientMachine, newBlock, true, logRetryCache);
    } catch (IOException ie) {
        NameNode.stateChangeLog.warn("DIR* NameSystem.append: " + ie.getMessage());
        throw ie;
    } finally {
        fsd.writeUnlock();
    }
    HdfsFileStatus stat = FSDirStatAndListingOp.getFileInfo(fsd, iip);
    if (lb != null) {
        NameNode.stateChangeLog.debug("DIR* NameSystem.appendFile: file {} for {} at {} block {} block" + " size {}", srcArg, holder, clientMachine, lb.getBlock(), lb.getBlock().getNumBytes());
    }
    return new LastBlockWithStatus(lb, stat);
}
Also used : FileAlreadyExistsException(org.apache.hadoop.fs.FileAlreadyExistsException) LastBlockWithStatus(org.apache.hadoop.hdfs.protocol.LastBlockWithStatus) FileNotFoundException(java.io.FileNotFoundException) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) IOException(java.io.IOException) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy) RetriableException(org.apache.hadoop.ipc.RetriableException)

Example 18 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class FSDirTruncateOp method prepareFileForTruncate.

/**
   * Convert current INode to UnderConstruction. Recreate lease. Create new
   * block for the truncated copy. Schedule truncation of the replicas.
   *
   * @param fsn namespace
   * @param iip inodes in the path containing the file
   * @param leaseHolder lease holder
   * @param clientMachine client machine info
   * @param lastBlockDelta last block delta size
   * @param newBlock new block
   * @return the returned block will be written to editLog and passed back
   *         into this method upon loading.
   * @throws IOException
   */
@VisibleForTesting
static Block prepareFileForTruncate(FSNamesystem fsn, INodesInPath iip, String leaseHolder, String clientMachine, long lastBlockDelta, Block newBlock) throws IOException {
    assert fsn.hasWriteLock();
    INodeFile file = iip.getLastINode().asFile();
    assert !file.isStriped();
    file.recordModification(iip.getLatestSnapshotId());
    file.toUnderConstruction(leaseHolder, clientMachine);
    assert file.isUnderConstruction() : "inode should be under construction.";
    fsn.getLeaseManager().addLease(file.getFileUnderConstructionFeature().getClientName(), file.getId());
    boolean shouldRecoverNow = (newBlock == null);
    BlockInfo oldBlock = file.getLastBlock();
    boolean shouldCopyOnTruncate = shouldCopyOnTruncate(fsn, file, oldBlock);
    if (newBlock == null) {
        newBlock = (shouldCopyOnTruncate) ? fsn.createNewBlock(BlockType.CONTIGUOUS) : new Block(oldBlock.getBlockId(), oldBlock.getNumBytes(), fsn.nextGenerationStamp(fsn.getBlockManager().isLegacyBlock(oldBlock)));
    }
    final BlockInfo truncatedBlockUC;
    BlockManager blockManager = fsn.getFSDirectory().getBlockManager();
    if (shouldCopyOnTruncate) {
        // Add new truncateBlock into blocksMap and
        // use oldBlock as a source for copy-on-truncate recovery
        truncatedBlockUC = new BlockInfoContiguous(newBlock, file.getPreferredBlockReplication());
        truncatedBlockUC.convertToBlockUnderConstruction(BlockUCState.UNDER_CONSTRUCTION, blockManager.getStorages(oldBlock));
        truncatedBlockUC.setNumBytes(oldBlock.getNumBytes() - lastBlockDelta);
        truncatedBlockUC.getUnderConstructionFeature().setTruncateBlock(oldBlock);
        file.setLastBlock(truncatedBlockUC);
        blockManager.addBlockCollection(truncatedBlockUC, file);
        NameNode.stateChangeLog.debug("BLOCK* prepareFileForTruncate: Scheduling copy-on-truncate to new" + " size {}  new block {} old block {}", truncatedBlockUC.getNumBytes(), newBlock, oldBlock);
    } else {
        // Use new generation stamp for in-place truncate recovery
        blockManager.convertLastBlockToUnderConstruction(file, lastBlockDelta);
        oldBlock = file.getLastBlock();
        assert !oldBlock.isComplete() : "oldBlock should be under construction";
        BlockUnderConstructionFeature uc = oldBlock.getUnderConstructionFeature();
        uc.setTruncateBlock(new Block(oldBlock));
        uc.getTruncateBlock().setNumBytes(oldBlock.getNumBytes() - lastBlockDelta);
        uc.getTruncateBlock().setGenerationStamp(newBlock.getGenerationStamp());
        truncatedBlockUC = oldBlock;
        NameNode.stateChangeLog.debug("BLOCK* prepareFileForTruncate: " + "{} Scheduling in-place block truncate to new size {}", uc, uc.getTruncateBlock().getNumBytes());
    }
    if (shouldRecoverNow) {
        truncatedBlockUC.getUnderConstructionFeature().initializeBlockRecovery(truncatedBlockUC, newBlock.getGenerationStamp());
    }
    return newBlock;
}
Also used : BlockInfoContiguous(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous) BlockUnderConstructionFeature(org.apache.hadoop.hdfs.server.blockmanagement.BlockUnderConstructionFeature) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) Block(org.apache.hadoop.hdfs.protocol.Block) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 19 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class FSDirWriteFileOp method saveAllocatedBlock.

/**
   * Save allocated block at the given pending filename
   *
   * @param fsn FSNamesystem
   * @param src path to the file
   * @param inodesInPath representing each of the components of src.
   *                     The last INode is the INode for {@code src} file.
   * @param newBlock newly allocated block to be save
   * @param targets target datanodes where replicas of the new block is placed
   * @throws QuotaExceededException If addition of block exceeds space quota
   */
private static void saveAllocatedBlock(FSNamesystem fsn, String src, INodesInPath inodesInPath, Block newBlock, DatanodeStorageInfo[] targets, BlockType blockType) throws IOException {
    assert fsn.hasWriteLock();
    BlockInfo b = addBlock(fsn.dir, src, inodesInPath, newBlock, targets, blockType);
    logAllocatedBlock(src, b);
    DatanodeStorageInfo.incrementBlocksScheduled(targets);
}
Also used : BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)

Example 20 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class FSDirWriteFileOp method storeAllocatedBlock.

/**
   * Part II of getAdditionalBlock().
   * Should repeat the same analysis of the file state as in Part 1,
   * but under the write lock.
   * If the conditions still hold, then allocate a new block with
   * the new targets, add it to the INode and to the BlocksMap.
   */
static LocatedBlock storeAllocatedBlock(FSNamesystem fsn, String src, long fileId, String clientName, ExtendedBlock previous, DatanodeStorageInfo[] targets) throws IOException {
    long offset;
    // Run the full analysis again, since things could have changed
    // while chooseTarget() was executing.
    LocatedBlock[] onRetryBlock = new LocatedBlock[1];
    INodesInPath iip = fsn.dir.resolvePath(null, src, fileId);
    FileState fileState = analyzeFileState(fsn, iip, fileId, clientName, previous, onRetryBlock);
    final INodeFile pendingFile = fileState.inode;
    src = fileState.path;
    if (onRetryBlock[0] != null) {
        if (onRetryBlock[0].getLocations().length > 0) {
            // This is a retry. Just return the last block if having locations.
            return onRetryBlock[0];
        } else {
            // add new chosen targets to already allocated block and return
            BlockInfo lastBlockInFile = pendingFile.getLastBlock();
            lastBlockInFile.getUnderConstructionFeature().setExpectedLocations(lastBlockInFile, targets, pendingFile.getBlockType());
            offset = pendingFile.computeFileSize();
            return makeLocatedBlock(fsn, lastBlockInFile, targets, offset);
        }
    }
    // commit the last block and complete it if it has minimum replicas
    fsn.commitOrCompleteLastBlock(pendingFile, fileState.iip, ExtendedBlock.getLocalBlock(previous));
    // allocate new block, record block locations in INode.
    final BlockType blockType = pendingFile.getBlockType();
    // allocate new block, record block locations in INode.
    Block newBlock = fsn.createNewBlock(blockType);
    INodesInPath inodesInPath = INodesInPath.fromINode(pendingFile);
    saveAllocatedBlock(fsn, src, inodesInPath, newBlock, targets, blockType);
    persistNewBlock(fsn, src, pendingFile);
    offset = pendingFile.computeFileSize();
    // Return located block
    return makeLocatedBlock(fsn, fsn.getStoredBlock(newBlock), targets, offset);
}
Also used : BlockType(org.apache.hadoop.hdfs.protocol.BlockType) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block)

Aggregations

BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)84 Test (org.junit.Test)28 Path (org.apache.hadoop.fs.Path)27 Block (org.apache.hadoop.hdfs.protocol.Block)22 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)19 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)16 INodeFile (org.apache.hadoop.hdfs.server.namenode.INodeFile)14 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)13 IOException (java.io.IOException)11 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)11 BlockInfoContiguous (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous)11 BlockInfoStriped (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped)11 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)10 DatanodeStorageInfo (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo)7 Configuration (org.apache.hadoop.conf.Configuration)6 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)6 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)6 BlockStoragePolicy (org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)5 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)5 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)5