Search in sources :

Example 1 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class FSNamesystem method updatePipelineInternal.

private void updatePipelineInternal(String clientName, ExtendedBlock oldBlock, ExtendedBlock newBlock, DatanodeID[] newNodes, String[] newStorageIDs, boolean logRetryCache) throws IOException {
    assert hasWriteLock();
    // check the vadility of the block and lease holder name
    final INodeFile pendingFile = checkUCBlock(oldBlock, clientName);
    final String src = pendingFile.getFullPathName();
    final BlockInfo lastBlock = pendingFile.getLastBlock();
    assert !lastBlock.isComplete();
    // check new GS & length: this is not expected
    if (newBlock.getGenerationStamp() <= lastBlock.getGenerationStamp()) {
        final String msg = "Update " + oldBlock + " but the new block " + newBlock + " does not have a larger generation stamp than the last block " + lastBlock;
        LOG.warn(msg);
        throw new IOException(msg);
    }
    if (newBlock.getNumBytes() < lastBlock.getNumBytes()) {
        final String msg = "Update " + oldBlock + " (size=" + oldBlock.getNumBytes() + ") to a smaller size block " + newBlock + " (size=" + newBlock.getNumBytes() + ")";
        LOG.warn(msg);
        throw new IOException(msg);
    }
    // Update old block with the new generation stamp and new length
    lastBlock.setNumBytes(newBlock.getNumBytes());
    lastBlock.setGenerationStampAndVerifyReplicas(newBlock.getGenerationStamp());
    // find the DatanodeDescriptor objects
    final DatanodeStorageInfo[] storages = blockManager.getDatanodeManager().getDatanodeStorageInfos(newNodes, newStorageIDs, "src=%s, oldBlock=%s, newBlock=%s, clientName=%s", src, oldBlock, newBlock, clientName);
    lastBlock.getUnderConstructionFeature().setExpectedLocations(lastBlock, storages, lastBlock.getBlockType());
    FSDirWriteFileOp.persistBlocks(dir, src, pendingFile, logRetryCache);
}
Also used : DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) IOException(java.io.IOException)

Example 2 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class FileUnderConstructionFeature method cleanZeroSizeBlock.

/**
   * When deleting a file in the current fs directory, and the file is contained
   * in a snapshot, we should delete the last block if it's under construction
   * and its size is 0.
   */
void cleanZeroSizeBlock(final INodeFile f, final BlocksMapUpdateInfo collectedBlocks) {
    final BlockInfo[] blocks = f.getBlocks();
    if (blocks != null && blocks.length > 0 && !blocks[blocks.length - 1].isComplete()) {
        BlockInfo lastUC = blocks[blocks.length - 1];
        if (lastUC.getNumBytes() == 0) {
            // this is a 0-sized block. do not need check its UC state here
            collectedBlocks.addDeleteBlock(lastUC);
            f.removeLastBlock(lastUC);
        }
    }
}
Also used : BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)

Example 3 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class FileUnderConstructionFeature method updateLengthOfLastBlock.

/**
   * Update the length for the last block
   *
   * @param lastBlockLength
   *          The length of the last block reported from client
   * @throws IOException
   */
void updateLengthOfLastBlock(INodeFile f, long lastBlockLength) throws IOException {
    BlockInfo lastBlock = f.getLastBlock();
    assert (lastBlock != null) : "The last block for path " + f.getFullPathName() + " is null when updating its length";
    assert !lastBlock.isComplete() : "The last block for path " + f.getFullPathName() + " is not under-construction when updating its length";
    lastBlock.setNumBytes(lastBlockLength);
}
Also used : BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)

Example 4 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class INodeFile method computeQuotaDeltaForTruncate.

/**
   * compute the quota usage change for a truncate op
   * @param newLength the length for truncation
   * TODO: properly handle striped blocks (HDFS-7622)
   **/
void computeQuotaDeltaForTruncate(long newLength, BlockStoragePolicy bsps, QuotaCounts delta) {
    final BlockInfo[] blocks = getBlocks();
    if (blocks.length == 0) {
        return;
    }
    long size = 0;
    for (BlockInfo b : blocks) {
        size += b.getNumBytes();
    }
    BlockInfo[] sblocks = null;
    FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
    if (sf != null) {
        FileDiff diff = sf.getDiffs().getLast();
        sblocks = diff != null ? diff.getBlocks() : null;
    }
    for (int i = blocks.length - 1; i >= 0 && size > newLength; size -= blocks[i].getNumBytes(), --i) {
        BlockInfo bi = blocks[i];
        long truncatedBytes;
        if (size - newLength < bi.getNumBytes()) {
            // Record a full block as the last block will be copied during
            // recovery
            truncatedBytes = bi.getNumBytes() - getPreferredBlockSize();
        } else {
            truncatedBytes = bi.getNumBytes();
        }
        // existing files
        if (sblocks != null && i < sblocks.length && bi.equals(sblocks[i])) {
            truncatedBytes -= bi.getNumBytes();
        }
        delta.addStorageSpace(-truncatedBytes * bi.getReplication());
        if (bsps != null) {
            List<StorageType> types = bsps.chooseStorageTypes(bi.getReplication());
            for (StorageType t : types) {
                if (t.supportTypeQuota()) {
                    delta.addTypeSpace(t, -truncatedBytes);
                }
            }
        }
    }
}
Also used : FileWithSnapshotFeature(org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature) StorageType(org.apache.hadoop.fs.StorageType) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) FileDiff(org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff)

Example 5 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class INodeFile method storagespaceConsumedStriped.

// TODO: support EC with heterogeneous storage
public final QuotaCounts storagespaceConsumedStriped() {
    QuotaCounts counts = new QuotaCounts.Builder().build();
    for (BlockInfo b : blocks) {
        Preconditions.checkState(b.isStriped());
        long blockSize = b.isComplete() ? ((BlockInfoStriped) b).spaceConsumed() : getPreferredBlockSize() * ((BlockInfoStriped) b).getTotalBlockNum();
        counts.addStorageSpace(blockSize);
    }
    return counts;
}
Also used : BlockInfoStriped(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)

Aggregations

BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)84 Test (org.junit.Test)28 Path (org.apache.hadoop.fs.Path)27 Block (org.apache.hadoop.hdfs.protocol.Block)22 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)19 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)16 INodeFile (org.apache.hadoop.hdfs.server.namenode.INodeFile)14 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)13 IOException (java.io.IOException)11 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)11 BlockInfoContiguous (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous)11 BlockInfoStriped (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped)11 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)10 DatanodeStorageInfo (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo)7 Configuration (org.apache.hadoop.conf.Configuration)6 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)6 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)6 BlockStoragePolicy (org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)5 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)5 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)5