use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.
the class INodeFile method checkBlockComplete.
/**
* Check if the i-th block is COMPLETE;
* when the i-th block is the last block, it may be allowed to be COMMITTED.
*
* @return null if the block passes the check;
* otherwise, return an error message.
*/
static String checkBlockComplete(BlockInfo[] blocks, int i, int numCommittedAllowed, short minReplication) {
final BlockInfo b = blocks[i];
final BlockUCState state = b.getBlockUCState();
if (state == BlockUCState.COMPLETE) {
return null;
}
if (b.isStriped() || i < blocks.length - numCommittedAllowed) {
return b + " is " + state + " but not COMPLETE";
}
if (state != BlockUCState.COMMITTED) {
return b + " is " + state + " but neither COMPLETE nor COMMITTED";
}
final int numExpectedLocations = b.getUnderConstructionFeature().getNumExpectedLocations();
if (numExpectedLocations <= minReplication) {
return b + " is " + state + " but numExpectedLocations = " + numExpectedLocations + " <= minReplication = " + minReplication;
}
return null;
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.
the class INodeFile method computeFileSize.
/**
* Compute file size of the current file.
*
* @param includesLastUcBlock
* If the last block is under construction, should it be included?
* @param usePreferredBlockSize4LastUcBlock
* If the last block is under construction, should we use actual
* block size or preferred block size?
* Note that usePreferredBlockSize4LastUcBlock is ignored
* if includesLastUcBlock == false.
* @return file size
*/
public final long computeFileSize(boolean includesLastUcBlock, boolean usePreferredBlockSize4LastUcBlock) {
if (blocks.length == 0) {
return 0;
}
final int last = blocks.length - 1;
//check if the last block is BlockInfoUnderConstruction
BlockInfo lastBlk = blocks[last];
long size = lastBlk.getNumBytes();
if (!lastBlk.isComplete()) {
if (!includesLastUcBlock) {
size = 0;
} else if (usePreferredBlockSize4LastUcBlock) {
size = isStriped() ? getPreferredBlockSize() * ((BlockInfoStriped) lastBlk).getDataBlockNum() : getPreferredBlockSize();
}
}
//sum other blocks
for (int i = 0; i < last; i++) {
size += blocks[i].getNumBytes();
}
return size;
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.
the class INodeFile method storagespaceConsumedContiguous.
public final QuotaCounts storagespaceConsumedContiguous(BlockStoragePolicy bsp) {
QuotaCounts counts = new QuotaCounts.Builder().build();
final Iterable<BlockInfo> blocks;
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
if (sf == null) {
blocks = Arrays.asList(getBlocks());
} else {
// Collect all distinct blocks
Set<BlockInfo> allBlocks = new HashSet<>(Arrays.asList(getBlocks()));
List<FileDiff> diffs = sf.getDiffs().asList();
for (FileDiff diff : diffs) {
BlockInfo[] diffBlocks = diff.getBlocks();
if (diffBlocks != null) {
allBlocks.addAll(Arrays.asList(diffBlocks));
}
}
blocks = allBlocks;
}
final short replication = getPreferredBlockReplication();
for (BlockInfo b : blocks) {
long blockSize = b.isComplete() ? b.getNumBytes() : getPreferredBlockSize();
counts.addStorageSpace(blockSize * replication);
if (bsp != null) {
List<StorageType> types = bsp.chooseStorageTypes(replication);
for (StorageType t : types) {
if (t.supportTypeQuota()) {
counts.addTypeSpace(t, blockSize);
}
}
}
}
return counts;
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.
the class INodeFile method removeLastBlock.
/**
* Remove a block from the block list. This block should be
* the last one on the list.
*/
BlockInfo removeLastBlock(Block oldblock) {
Preconditions.checkState(isUnderConstruction(), "file is no longer under construction");
if (blocks.length == 0) {
return null;
}
int size_1 = blocks.length - 1;
if (!blocks[size_1].equals(oldblock)) {
return null;
}
BlockInfo lastBlock = blocks[size_1];
//copy to a new list
BlockInfo[] newlist = new BlockInfo[size_1];
System.arraycopy(blocks, 0, newlist, 0, size_1);
setBlocks(newlist);
lastBlock.delete();
return lastBlock;
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.
the class INodeFile method getSnapshotBlocksToRetain.
/** Exclude blocks collected for deletion that belong to a snapshot. */
Set<BlockInfo> getSnapshotBlocksToRetain(int snapshotId) {
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
if (sf == null) {
return null;
}
BlockInfo[] snapshotBlocks = getDiffs().findEarlierSnapshotBlocks(snapshotId);
if (snapshotBlocks == null) {
return null;
}
Set<BlockInfo> toRetain = new HashSet<>(snapshotBlocks.length);
Collections.addAll(toRetain, snapshotBlocks);
return toRetain;
}
Aggregations