Search in sources :

Example 71 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class INodeFile method checkBlockComplete.

/**
   * Check if the i-th block is COMPLETE;
   * when the i-th block is the last block, it may be allowed to be COMMITTED.
   *
   * @return null if the block passes the check;
   *              otherwise, return an error message.
   */
static String checkBlockComplete(BlockInfo[] blocks, int i, int numCommittedAllowed, short minReplication) {
    final BlockInfo b = blocks[i];
    final BlockUCState state = b.getBlockUCState();
    if (state == BlockUCState.COMPLETE) {
        return null;
    }
    if (b.isStriped() || i < blocks.length - numCommittedAllowed) {
        return b + " is " + state + " but not COMPLETE";
    }
    if (state != BlockUCState.COMMITTED) {
        return b + " is " + state + " but neither COMPLETE nor COMMITTED";
    }
    final int numExpectedLocations = b.getUnderConstructionFeature().getNumExpectedLocations();
    if (numExpectedLocations <= minReplication) {
        return b + " is " + state + " but numExpectedLocations = " + numExpectedLocations + " <= minReplication = " + minReplication;
    }
    return null;
}
Also used : BlockUCState(org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)

Example 72 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class INodeFile method computeFileSize.

/**
   * Compute file size of the current file.
   * 
   * @param includesLastUcBlock
   *          If the last block is under construction, should it be included?
   * @param usePreferredBlockSize4LastUcBlock
   *          If the last block is under construction, should we use actual
   *          block size or preferred block size?
   *          Note that usePreferredBlockSize4LastUcBlock is ignored
   *          if includesLastUcBlock == false.
   * @return file size
   */
public final long computeFileSize(boolean includesLastUcBlock, boolean usePreferredBlockSize4LastUcBlock) {
    if (blocks.length == 0) {
        return 0;
    }
    final int last = blocks.length - 1;
    //check if the last block is BlockInfoUnderConstruction
    BlockInfo lastBlk = blocks[last];
    long size = lastBlk.getNumBytes();
    if (!lastBlk.isComplete()) {
        if (!includesLastUcBlock) {
            size = 0;
        } else if (usePreferredBlockSize4LastUcBlock) {
            size = isStriped() ? getPreferredBlockSize() * ((BlockInfoStriped) lastBlk).getDataBlockNum() : getPreferredBlockSize();
        }
    }
    //sum other blocks
    for (int i = 0; i < last; i++) {
        size += blocks[i].getNumBytes();
    }
    return size;
}
Also used : BlockInfoStriped(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)

Example 73 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class INodeFile method storagespaceConsumedContiguous.

public final QuotaCounts storagespaceConsumedContiguous(BlockStoragePolicy bsp) {
    QuotaCounts counts = new QuotaCounts.Builder().build();
    final Iterable<BlockInfo> blocks;
    FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
    if (sf == null) {
        blocks = Arrays.asList(getBlocks());
    } else {
        // Collect all distinct blocks
        Set<BlockInfo> allBlocks = new HashSet<>(Arrays.asList(getBlocks()));
        List<FileDiff> diffs = sf.getDiffs().asList();
        for (FileDiff diff : diffs) {
            BlockInfo[] diffBlocks = diff.getBlocks();
            if (diffBlocks != null) {
                allBlocks.addAll(Arrays.asList(diffBlocks));
            }
        }
        blocks = allBlocks;
    }
    final short replication = getPreferredBlockReplication();
    for (BlockInfo b : blocks) {
        long blockSize = b.isComplete() ? b.getNumBytes() : getPreferredBlockSize();
        counts.addStorageSpace(blockSize * replication);
        if (bsp != null) {
            List<StorageType> types = bsp.chooseStorageTypes(replication);
            for (StorageType t : types) {
                if (t.supportTypeQuota()) {
                    counts.addTypeSpace(t, blockSize);
                }
            }
        }
    }
    return counts;
}
Also used : FileWithSnapshotFeature(org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature) StorageType(org.apache.hadoop.fs.StorageType) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) FileDiff(org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff) HashSet(java.util.HashSet)

Example 74 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class INodeFile method removeLastBlock.

/**
   * Remove a block from the block list. This block should be
   * the last one on the list.
   */
BlockInfo removeLastBlock(Block oldblock) {
    Preconditions.checkState(isUnderConstruction(), "file is no longer under construction");
    if (blocks.length == 0) {
        return null;
    }
    int size_1 = blocks.length - 1;
    if (!blocks[size_1].equals(oldblock)) {
        return null;
    }
    BlockInfo lastBlock = blocks[size_1];
    //copy to a new list
    BlockInfo[] newlist = new BlockInfo[size_1];
    System.arraycopy(blocks, 0, newlist, 0, size_1);
    setBlocks(newlist);
    lastBlock.delete();
    return lastBlock;
}
Also used : BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)

Example 75 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class INodeFile method getSnapshotBlocksToRetain.

/** Exclude blocks collected for deletion that belong to a snapshot. */
Set<BlockInfo> getSnapshotBlocksToRetain(int snapshotId) {
    FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
    if (sf == null) {
        return null;
    }
    BlockInfo[] snapshotBlocks = getDiffs().findEarlierSnapshotBlocks(snapshotId);
    if (snapshotBlocks == null) {
        return null;
    }
    Set<BlockInfo> toRetain = new HashSet<>(snapshotBlocks.length);
    Collections.addAll(toRetain, snapshotBlocks);
    return toRetain;
}
Also used : FileWithSnapshotFeature(org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) HashSet(java.util.HashSet)

Aggregations

BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)84 Test (org.junit.Test)28 Path (org.apache.hadoop.fs.Path)27 Block (org.apache.hadoop.hdfs.protocol.Block)22 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)19 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)16 INodeFile (org.apache.hadoop.hdfs.server.namenode.INodeFile)14 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)13 IOException (java.io.IOException)11 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)11 BlockInfoContiguous (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous)11 BlockInfoStriped (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped)11 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)10 DatanodeStorageInfo (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo)7 Configuration (org.apache.hadoop.conf.Configuration)6 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)6 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)6 BlockStoragePolicy (org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)5 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)5 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)5