Search in sources :

Example 6 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class INodeFile method isBlockInLatestSnapshot.

/**
   * @return true if the block is contained in a snapshot or false otherwise.
   */
boolean isBlockInLatestSnapshot(BlockInfo block) {
    FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
    if (sf == null || sf.getDiffs() == null) {
        return false;
    }
    BlockInfo[] snapshotBlocks = getDiffs().findEarlierSnapshotBlocks(getDiffs().getLastSnapshotId());
    return snapshotBlocks != null && Arrays.asList(snapshotBlocks).contains(block);
}
Also used : FileWithSnapshotFeature(org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)

Example 7 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class FSEditLogLoader method updateBlocks.

/**
   * Update in-memory data structures with new block information.
   * @throws IOException
   */
private void updateBlocks(FSDirectory fsDir, BlockListUpdatingOp op, INodesInPath iip, INodeFile file, ErasureCodingPolicy ecPolicy) throws IOException {
    // Update its block list
    BlockInfo[] oldBlocks = file.getBlocks();
    Block[] newBlocks = op.getBlocks();
    String path = op.getPath();
    // Are we only updating the last block's gen stamp.
    boolean isGenStampUpdate = oldBlocks.length == newBlocks.length;
    // First, update blocks in common
    for (int i = 0; i < oldBlocks.length && i < newBlocks.length; i++) {
        BlockInfo oldBlock = oldBlocks[i];
        Block newBlock = newBlocks[i];
        boolean isLastBlock = i == newBlocks.length - 1;
        if (oldBlock.getBlockId() != newBlock.getBlockId() || (oldBlock.getGenerationStamp() != newBlock.getGenerationStamp() && !(isGenStampUpdate && isLastBlock))) {
            throw new IOException("Mismatched block IDs or generation stamps, " + "attempting to replace block " + oldBlock + " with " + newBlock + " as block # " + i + "/" + newBlocks.length + " of " + path);
        }
        oldBlock.setNumBytes(newBlock.getNumBytes());
        boolean changeMade = oldBlock.getGenerationStamp() != newBlock.getGenerationStamp();
        oldBlock.setGenerationStamp(newBlock.getGenerationStamp());
        if (!oldBlock.isComplete() && (!isLastBlock || op.shouldCompleteLastBlock())) {
            changeMade = true;
            fsNamesys.getBlockManager().forceCompleteBlock(oldBlock);
        }
        if (changeMade) {
            // The state or gen-stamp of the block has changed. So, we may be
            // able to process some messages from datanodes that we previously
            // were unable to process.
            fsNamesys.getBlockManager().processQueuedMessagesForBlock(newBlock);
        }
    }
    if (newBlocks.length < oldBlocks.length) {
        // We're removing a block from the file, e.g. abandonBlock(...)
        if (!file.isUnderConstruction()) {
            throw new IOException("Trying to remove a block from file " + path + " which is not under construction.");
        }
        if (newBlocks.length != oldBlocks.length - 1) {
            throw new IOException("Trying to remove more than one block from file " + path);
        }
        Block oldBlock = oldBlocks[oldBlocks.length - 1];
        boolean removed = FSDirWriteFileOp.unprotectedRemoveBlock(fsDir, path, iip, file, oldBlock);
        if (!removed && !(op instanceof UpdateBlocksOp)) {
            throw new IOException("Trying to delete non-existant block " + oldBlock);
        }
    } else if (newBlocks.length > oldBlocks.length) {
        final boolean isStriped = ecPolicy != null;
        // We're adding blocks
        for (int i = oldBlocks.length; i < newBlocks.length; i++) {
            Block newBlock = newBlocks[i];
            final BlockInfo newBI;
            if (!op.shouldCompleteLastBlock()) {
                // until several blocks in?
                if (isStriped) {
                    newBI = new BlockInfoStriped(newBlock, ecPolicy);
                } else {
                    newBI = new BlockInfoContiguous(newBlock, file.getPreferredBlockReplication());
                }
                newBI.convertToBlockUnderConstruction(BlockUCState.UNDER_CONSTRUCTION, null);
            } else {
                // OP_ADD operations as each block is allocated.
                if (isStriped) {
                    newBI = new BlockInfoStriped(newBlock, ecPolicy);
                } else {
                    newBI = new BlockInfoContiguous(newBlock, file.getFileReplication());
                }
            }
            fsNamesys.getBlockManager().addBlockCollectionWithCheck(newBI, file);
            file.addBlock(newBI);
            fsNamesys.getBlockManager().processQueuedMessagesForBlock(newBlock);
        }
    }
}
Also used : BlockInfoStriped(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped) BlockInfoContiguous(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) UpdateBlocksOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateBlocksOp) Block(org.apache.hadoop.hdfs.protocol.Block) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) IOException(java.io.IOException)

Example 8 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class FSEditLogLoader method addNewBlock.

/**
   * Add a new block into the given INodeFile
   */
private void addNewBlock(AddBlockOp op, INodeFile file, ErasureCodingPolicy ecPolicy) throws IOException {
    BlockInfo[] oldBlocks = file.getBlocks();
    Block pBlock = op.getPenultimateBlock();
    Block newBlock = op.getLastBlock();
    if (pBlock != null) {
        // the penultimate block is not null
        assert oldBlocks != null && oldBlocks.length > 0;
        // compare pBlock with the last block of oldBlocks
        BlockInfo oldLastBlock = oldBlocks[oldBlocks.length - 1];
        if (oldLastBlock.getBlockId() != pBlock.getBlockId() || oldLastBlock.getGenerationStamp() != pBlock.getGenerationStamp()) {
            throw new IOException("Mismatched block IDs or generation stamps for the old last block of file " + op.getPath() + ", the old last block is " + oldLastBlock + ", and the block read from editlog is " + pBlock);
        }
        oldLastBlock.setNumBytes(pBlock.getNumBytes());
        if (!oldLastBlock.isComplete()) {
            fsNamesys.getBlockManager().forceCompleteBlock(oldLastBlock);
            fsNamesys.getBlockManager().processQueuedMessagesForBlock(pBlock);
        }
    } else {
        // the penultimate block is null
        Preconditions.checkState(oldBlocks == null || oldBlocks.length == 0);
    }
    // add the new block
    final BlockInfo newBlockInfo;
    boolean isStriped = ecPolicy != null;
    if (isStriped) {
        newBlockInfo = new BlockInfoStriped(newBlock, ecPolicy);
    } else {
        newBlockInfo = new BlockInfoContiguous(newBlock, file.getPreferredBlockReplication());
    }
    newBlockInfo.convertToBlockUnderConstruction(BlockUCState.UNDER_CONSTRUCTION, null);
    fsNamesys.getBlockManager().addBlockCollectionWithCheck(newBlockInfo, file);
    file.addBlock(newBlockInfo);
    fsNamesys.getBlockManager().processQueuedMessagesForBlock(newBlock);
}
Also used : BlockInfoStriped(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped) BlockInfoContiguous(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) Block(org.apache.hadoop.hdfs.protocol.Block) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) IOException(java.io.IOException)

Example 9 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class FSNamesystem method commitBlockSynchronization.

void commitBlockSynchronization(ExtendedBlock oldBlock, long newgenerationstamp, long newlength, boolean closeFile, boolean deleteblock, DatanodeID[] newtargets, String[] newtargetstorages) throws IOException {
    LOG.info("commitBlockSynchronization(oldBlock=" + oldBlock + ", newgenerationstamp=" + newgenerationstamp + ", newlength=" + newlength + ", newtargets=" + Arrays.asList(newtargets) + ", closeFile=" + closeFile + ", deleteBlock=" + deleteblock + ")");
    checkOperation(OperationCategory.WRITE);
    final String src;
    writeLock();
    boolean copyTruncate = false;
    BlockInfo truncatedBlock = null;
    try {
        checkOperation(OperationCategory.WRITE);
        // If a DN tries to commit to the standby, the recovery will
        // fail, and the next retry will succeed on the new NN.
        checkNameNodeSafeMode("Cannot commitBlockSynchronization while in safe mode");
        final BlockInfo storedBlock = getStoredBlock(ExtendedBlock.getLocalBlock(oldBlock));
        if (storedBlock == null) {
            if (deleteblock) {
                // to locate the block.
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Block (=" + oldBlock + ") not found");
                }
                return;
            } else {
                throw new IOException("Block (=" + oldBlock + ") not found");
            }
        }
        final long oldGenerationStamp = storedBlock.getGenerationStamp();
        final long oldNumBytes = storedBlock.getNumBytes();
        //
        if (storedBlock.isDeleted()) {
            throw new IOException("The blockCollection of " + storedBlock + " is null, likely because the file owning this block was" + " deleted and the block removal is delayed");
        }
        final INodeFile iFile = getBlockCollection(storedBlock);
        src = iFile.getFullPathName();
        if (isFileDeleted(iFile)) {
            throw new FileNotFoundException("File not found: " + src + ", likely due to delayed block removal");
        }
        if ((!iFile.isUnderConstruction() || storedBlock.isComplete()) && iFile.getLastBlock().isComplete()) {
            if (LOG.isDebugEnabled()) {
                LOG.debug("Unexpected block (=" + oldBlock + ") since the file (=" + iFile.getLocalName() + ") is not under construction");
            }
            return;
        }
        truncatedBlock = iFile.getLastBlock();
        final long recoveryId = truncatedBlock.getUnderConstructionFeature().getBlockRecoveryId();
        copyTruncate = truncatedBlock.getBlockId() != storedBlock.getBlockId();
        if (recoveryId != newgenerationstamp) {
            throw new IOException("The recovery id " + newgenerationstamp + " does not match current recovery id " + recoveryId + " for block " + oldBlock);
        }
        if (deleteblock) {
            Block blockToDel = ExtendedBlock.getLocalBlock(oldBlock);
            boolean remove = iFile.removeLastBlock(blockToDel) != null;
            if (remove) {
                blockManager.removeBlock(storedBlock);
            }
        } else {
            // update last block
            if (!copyTruncate) {
                storedBlock.setGenerationStamp(newgenerationstamp);
                storedBlock.setNumBytes(newlength);
            }
            // Find the target DatanodeStorageInfos. If not found because of invalid
            // or empty DatanodeID/StorageID, the slot of same offset in dsInfos is
            // null
            final DatanodeStorageInfo[] dsInfos = blockManager.getDatanodeManager().getDatanodeStorageInfos(newtargets, newtargetstorages, "src=%s, oldBlock=%s, newgenerationstamp=%d, newlength=%d", src, oldBlock, newgenerationstamp, newlength);
            if (closeFile && dsInfos != null) {
                // blocksReceived from Datanodes take a long time to arrive.
                for (int i = 0; i < dsInfos.length; i++) {
                    if (dsInfos[i] != null) {
                        if (copyTruncate) {
                            dsInfos[i].addBlock(truncatedBlock, truncatedBlock);
                        } else {
                            Block bi = new Block(storedBlock);
                            if (storedBlock.isStriped()) {
                                bi.setBlockId(bi.getBlockId() + i);
                            }
                            dsInfos[i].addBlock(storedBlock, bi);
                        }
                    }
                }
            }
            // add pipeline locations into the INodeUnderConstruction
            if (copyTruncate) {
                iFile.convertLastBlockToUC(truncatedBlock, dsInfos);
            } else {
                iFile.convertLastBlockToUC(storedBlock, dsInfos);
                if (closeFile) {
                    blockManager.markBlockReplicasAsCorrupt(oldBlock.getLocalBlock(), storedBlock, oldGenerationStamp, oldNumBytes, dsInfos);
                }
            }
        }
        if (closeFile) {
            if (copyTruncate) {
                closeFileCommitBlocks(src, iFile, truncatedBlock);
                if (!iFile.isBlockInLatestSnapshot(storedBlock)) {
                    blockManager.removeBlock(storedBlock);
                }
            } else {
                closeFileCommitBlocks(src, iFile, storedBlock);
            }
        } else {
            // If this commit does not want to close the file, persist blocks
            FSDirWriteFileOp.persistBlocks(dir, src, iFile, false);
        }
    } finally {
        writeUnlock("commitBlockSynchronization");
    }
    getEditLog().logSync();
    if (closeFile) {
        LOG.info("commitBlockSynchronization(oldBlock=" + oldBlock + ", file=" + src + (copyTruncate ? ", newBlock=" + truncatedBlock : ", newgenerationstamp=" + newgenerationstamp) + ", newlength=" + newlength + ", newtargets=" + Arrays.asList(newtargets) + ") successful");
    } else {
        LOG.info("commitBlockSynchronization(" + oldBlock + ") successful");
    }
}
Also used : DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) FileNotFoundException(java.io.FileNotFoundException) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) IOException(java.io.IOException)

Example 10 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class FSNamesystem method internalReleaseLease.

/**
   * Move a file that is being written to be immutable.
   * @param src The filename
   * @param lease The lease for the client creating the file
   * @param recoveryLeaseHolder reassign lease to this holder if the last block
   *        needs recovery; keep current holder if null.
   * @throws AlreadyBeingCreatedException if file is waiting to achieve minimal
   *         replication;<br>
   *         RecoveryInProgressException if lease recovery is in progress.<br>
   *         IOException in case of an error.
   * @return true  if file has been successfully finalized and closed or 
   *         false if block recovery has been initiated. Since the lease owner
   *         has been changed and logged, caller should call logSync().
   */
boolean internalReleaseLease(Lease lease, String src, INodesInPath iip, String recoveryLeaseHolder) throws IOException {
    LOG.info("Recovering " + lease + ", src=" + src);
    assert !isInSafeMode();
    assert hasWriteLock();
    final INodeFile pendingFile = iip.getLastINode().asFile();
    int nrBlocks = pendingFile.numBlocks();
    BlockInfo[] blocks = pendingFile.getBlocks();
    int nrCompleteBlocks;
    BlockInfo curBlock = null;
    for (nrCompleteBlocks = 0; nrCompleteBlocks < nrBlocks; nrCompleteBlocks++) {
        curBlock = blocks[nrCompleteBlocks];
        if (!curBlock.isComplete())
            break;
        assert blockManager.hasMinStorage(curBlock) : "A COMPLETE block is not minimally replicated in " + src;
    }
    // then reap lease immediately and close the file.
    if (nrCompleteBlocks == nrBlocks) {
        finalizeINodeFileUnderConstruction(src, pendingFile, iip.getLatestSnapshotId(), false);
        NameNode.stateChangeLog.warn("BLOCK*" + " internalReleaseLease: All existing blocks are COMPLETE," + " lease removed, file " + src + " closed.");
        // closed!
        return true;
    }
    // If the penultimate block is not COMPLETE, then it must be COMMITTED.
    if (nrCompleteBlocks < nrBlocks - 2 || nrCompleteBlocks == nrBlocks - 2 && curBlock != null && curBlock.getBlockUCState() != BlockUCState.COMMITTED) {
        final String message = "DIR* NameSystem.internalReleaseLease: " + "attempt to release a create lock on " + src + " but file is already closed.";
        NameNode.stateChangeLog.warn(message);
        throw new IOException(message);
    }
    // The last block is not COMPLETE, and
    // that the penultimate block if exists is either COMPLETE or COMMITTED
    final BlockInfo lastBlock = pendingFile.getLastBlock();
    BlockUCState lastBlockState = lastBlock.getBlockUCState();
    BlockInfo penultimateBlock = pendingFile.getPenultimateBlock();
    // If penultimate block doesn't exist then its minReplication is met
    boolean penultimateBlockMinStorage = penultimateBlock == null || blockManager.hasMinStorage(penultimateBlock);
    switch(lastBlockState) {
        case COMPLETE:
            assert false : "Already checked that the last block is incomplete";
            break;
        case COMMITTED:
            // Close file if committed blocks are minimally replicated
            if (penultimateBlockMinStorage && blockManager.hasMinStorage(lastBlock)) {
                finalizeINodeFileUnderConstruction(src, pendingFile, iip.getLatestSnapshotId(), false);
                NameNode.stateChangeLog.warn("BLOCK*" + " internalReleaseLease: Committed blocks are minimally" + " replicated, lease removed, file" + src + " closed.");
                // closed!
                return true;
            }
            // Cannot close file right now, since some blocks 
            // are not yet minimally replicated.
            // This may potentially cause infinite loop in lease recovery
            // if there are no valid replicas on data-nodes.
            String message = "DIR* NameSystem.internalReleaseLease: " + "Failed to release lease for file " + src + ". Committed blocks are waiting to be minimally replicated." + " Try again later.";
            NameNode.stateChangeLog.warn(message);
            throw new AlreadyBeingCreatedException(message);
        case UNDER_CONSTRUCTION:
        case UNDER_RECOVERY:
            BlockUnderConstructionFeature uc = lastBlock.getUnderConstructionFeature();
            // determine if last block was intended to be truncated
            Block recoveryBlock = uc.getTruncateBlock();
            boolean truncateRecovery = recoveryBlock != null;
            boolean copyOnTruncate = truncateRecovery && recoveryBlock.getBlockId() != lastBlock.getBlockId();
            assert !copyOnTruncate || recoveryBlock.getBlockId() < lastBlock.getBlockId() && recoveryBlock.getGenerationStamp() < lastBlock.getGenerationStamp() && recoveryBlock.getNumBytes() > lastBlock.getNumBytes() : "wrong recoveryBlock";
            // setup the last block locations from the blockManager if not known
            if (uc.getNumExpectedLocations() == 0) {
                uc.setExpectedLocations(lastBlock, blockManager.getStorages(lastBlock), lastBlock.getBlockType());
            }
            if (uc.getNumExpectedLocations() == 0 && lastBlock.getNumBytes() == 0) {
                // There is no datanode reported to this block.
                // may be client have crashed before writing data to pipeline.
                // This blocks doesn't need any recovery.
                // We can remove this block and close the file.
                pendingFile.removeLastBlock(lastBlock);
                finalizeINodeFileUnderConstruction(src, pendingFile, iip.getLatestSnapshotId(), false);
                NameNode.stateChangeLog.warn("BLOCK* internalReleaseLease: " + "Removed empty last block and closed file " + src);
                return true;
            }
            // start recovery of the last block for this file
            long blockRecoveryId = nextGenerationStamp(blockManager.isLegacyBlock(lastBlock));
            lease = reassignLease(lease, src, recoveryLeaseHolder, pendingFile);
            if (copyOnTruncate) {
                lastBlock.setGenerationStamp(blockRecoveryId);
            } else if (truncateRecovery) {
                recoveryBlock.setGenerationStamp(blockRecoveryId);
            }
            uc.initializeBlockRecovery(lastBlock, blockRecoveryId);
            leaseManager.renewLease(lease);
            // Cannot close file right now, since the last block requires recovery.
            // This may potentially cause infinite loop in lease recovery
            // if there are no valid replicas on data-nodes.
            NameNode.stateChangeLog.warn("DIR* NameSystem.internalReleaseLease: " + "File " + src + " has not been closed." + " Lease recovery is in progress. " + "RecoveryId = " + blockRecoveryId + " for block " + lastBlock);
            break;
    }
    return false;
}
Also used : AlreadyBeingCreatedException(org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException) BlockUCState(org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState) BlockUnderConstructionFeature(org.apache.hadoop.hdfs.server.blockmanagement.BlockUnderConstructionFeature) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) IOException(java.io.IOException)

Aggregations

BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)84 Test (org.junit.Test)28 Path (org.apache.hadoop.fs.Path)27 Block (org.apache.hadoop.hdfs.protocol.Block)22 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)19 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)16 INodeFile (org.apache.hadoop.hdfs.server.namenode.INodeFile)14 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)13 IOException (java.io.IOException)11 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)11 BlockInfoContiguous (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous)11 BlockInfoStriped (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped)11 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)10 DatanodeStorageInfo (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo)7 Configuration (org.apache.hadoop.conf.Configuration)6 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)6 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)6 BlockStoragePolicy (org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)5 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)5 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)5