Search in sources :

Example 76 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class INodeFile method getBlocks.

/** @return blocks of the file corresponding to the snapshot. */
public BlockInfo[] getBlocks(int snapshot) {
    if (snapshot == CURRENT_STATE_ID || getDiffs() == null) {
        return getBlocks();
    }
    // find blocks stored in snapshot diffs (for truncate)
    FileDiff diff = getDiffs().getDiffById(snapshot);
    // note that currently FileDiff can only store contiguous blocks
    BlockInfo[] snapshotBlocks = diff == null ? getBlocks() : diff.getBlocks();
    if (snapshotBlocks != null) {
        return snapshotBlocks;
    }
    // Blocks are not in the current snapshot
    // Find next snapshot with blocks present or return current file blocks
    snapshotBlocks = getDiffs().findLaterSnapshotBlocks(snapshot);
    return (snapshotBlocks == null) ? getBlocks() : snapshotBlocks;
}
Also used : BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) FileDiff(org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff)

Example 77 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class INodeFile method collectBlocksBeyondMax.

/**
   * Remove full blocks at the end file up to newLength
   * @return sum of sizes of the remained blocks
   */
public long collectBlocksBeyondMax(final long max, final BlocksMapUpdateInfo collectedBlocks, Set<BlockInfo> toRetain) {
    final BlockInfo[] oldBlocks = getBlocks();
    if (oldBlocks == null) {
        return 0;
    }
    // find the minimum n such that the size of the first n blocks > max
    int n = 0;
    long size = 0;
    for (; n < oldBlocks.length && max > size; n++) {
        size += oldBlocks[n].getNumBytes();
    }
    if (n >= oldBlocks.length) {
        return size;
    }
    // starting from block n, the data is beyond max.
    // resize the array.
    truncateBlocksTo(n);
    // collect the blocks beyond max
    if (collectedBlocks != null) {
        for (; n < oldBlocks.length; n++) {
            final BlockInfo del = oldBlocks[n];
            if (toRetain == null || !toRetain.contains(del)) {
                collectedBlocks.addDeleteBlock(del);
            }
        }
    }
    return size;
}
Also used : BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)

Example 78 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class INodeFile method concatBlocks.

/**
   * append array of blocks to this.blocks
   */
void concatBlocks(INodeFile[] inodes, BlockManager bm) {
    int size = this.blocks.length;
    int totalAddedBlocks = 0;
    for (INodeFile f : inodes) {
        Preconditions.checkState(f.isStriped() == this.isStriped());
        totalAddedBlocks += f.blocks.length;
    }
    BlockInfo[] newlist = new BlockInfo[size + totalAddedBlocks];
    System.arraycopy(this.blocks, 0, newlist, 0, size);
    for (INodeFile in : inodes) {
        System.arraycopy(in.blocks, 0, newlist, size, in.blocks.length);
        size += in.blocks.length;
    }
    setBlocks(newlist);
    for (BlockInfo b : blocks) {
        b.setBlockCollectionId(getId());
        short oldRepl = b.getReplication();
        short repl = getPreferredBlockReplication();
        if (oldRepl != repl) {
            bm.setReplication(oldRepl, repl, b);
        }
    }
}
Also used : BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)

Example 79 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class FSNamesystem method addCommittedBlocksToPending.

void addCommittedBlocksToPending(final INodeFile pendingFile) {
    final BlockInfo[] blocks = pendingFile.getBlocks();
    int i = blocks.length - numCommittedAllowed;
    if (i < 0) {
        i = 0;
    }
    for (; i < blocks.length; i++) {
        final BlockInfo b = blocks[i];
        if (b != null && b.getBlockUCState() == BlockUCState.COMMITTED) {
            // b is COMMITTED but not yet COMPLETE, add it to pending replication.
            blockManager.addExpectedReplicasToPending(b);
        }
    }
}
Also used : BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)

Example 80 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class FileDiffList method combineAndCollectSnapshotBlocks.

/**
   * Copy blocks from the removed snapshot into the previous snapshot
   * up to the file length of the latter.
   * Collect unused blocks of the removed snapshot.
   */
void combineAndCollectSnapshotBlocks(INode.ReclaimContext reclaimContext, INodeFile file, FileDiff removed) {
    BlockInfo[] removedBlocks = removed.getBlocks();
    if (removedBlocks == null) {
        FileWithSnapshotFeature sf = file.getFileWithSnapshotFeature();
        assert sf != null : "FileWithSnapshotFeature is null";
        if (sf.isCurrentFileDeleted())
            sf.collectBlocksAndClear(reclaimContext, file);
        return;
    }
    int p = getPrior(removed.getSnapshotId(), true);
    FileDiff earlierDiff = p == Snapshot.NO_SNAPSHOT_ID ? null : getDiffById(p);
    // Copy blocks to the previous snapshot if not set already
    if (earlierDiff != null) {
        earlierDiff.setBlocks(removedBlocks);
    }
    BlockInfo[] earlierBlocks = (earlierDiff == null ? new BlockInfoContiguous[] {} : earlierDiff.getBlocks());
    // Find later snapshot (or file itself) with blocks
    BlockInfo[] laterBlocks = findLaterSnapshotBlocks(removed.getSnapshotId());
    laterBlocks = (laterBlocks == null) ? file.getBlocks() : laterBlocks;
    // Skip blocks, which belong to either the earlier or the later lists
    int i = 0;
    for (; i < removedBlocks.length; i++) {
        if (i < earlierBlocks.length && removedBlocks[i] == earlierBlocks[i])
            continue;
        if (i < laterBlocks.length && removedBlocks[i] == laterBlocks[i])
            continue;
        break;
    }
    // Check if last block is part of truncate recovery
    BlockInfo lastBlock = file.getLastBlock();
    Block dontRemoveBlock = null;
    if (lastBlock != null && lastBlock.getBlockUCState().equals(HdfsServerConstants.BlockUCState.UNDER_RECOVERY)) {
        dontRemoveBlock = lastBlock.getUnderConstructionFeature().getTruncateBlock();
    }
    // Collect the remaining blocks of the file, ignoring truncate block
    for (; i < removedBlocks.length; i++) {
        if (dontRemoveBlock == null || !removedBlocks[i].equals(dontRemoveBlock)) {
            reclaimContext.collectedBlocks().addDeleteBlock(removedBlocks[i]);
        }
    }
}
Also used : BlockInfoContiguous(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) Block(org.apache.hadoop.hdfs.protocol.Block)

Aggregations

BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)84 Test (org.junit.Test)28 Path (org.apache.hadoop.fs.Path)27 Block (org.apache.hadoop.hdfs.protocol.Block)22 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)19 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)16 INodeFile (org.apache.hadoop.hdfs.server.namenode.INodeFile)14 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)13 IOException (java.io.IOException)11 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)11 BlockInfoContiguous (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous)11 BlockInfoStriped (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped)11 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)10 DatanodeStorageInfo (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo)7 Configuration (org.apache.hadoop.conf.Configuration)6 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)6 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)6 BlockStoragePolicy (org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)5 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)5 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)5