use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.
the class INodeFile method getBlocks.
/** @return blocks of the file corresponding to the snapshot. */
public BlockInfo[] getBlocks(int snapshot) {
if (snapshot == CURRENT_STATE_ID || getDiffs() == null) {
return getBlocks();
}
// find blocks stored in snapshot diffs (for truncate)
FileDiff diff = getDiffs().getDiffById(snapshot);
// note that currently FileDiff can only store contiguous blocks
BlockInfo[] snapshotBlocks = diff == null ? getBlocks() : diff.getBlocks();
if (snapshotBlocks != null) {
return snapshotBlocks;
}
// Blocks are not in the current snapshot
// Find next snapshot with blocks present or return current file blocks
snapshotBlocks = getDiffs().findLaterSnapshotBlocks(snapshot);
return (snapshotBlocks == null) ? getBlocks() : snapshotBlocks;
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.
the class INodeFile method collectBlocksBeyondMax.
/**
* Remove full blocks at the end file up to newLength
* @return sum of sizes of the remained blocks
*/
public long collectBlocksBeyondMax(final long max, final BlocksMapUpdateInfo collectedBlocks, Set<BlockInfo> toRetain) {
final BlockInfo[] oldBlocks = getBlocks();
if (oldBlocks == null) {
return 0;
}
// find the minimum n such that the size of the first n blocks > max
int n = 0;
long size = 0;
for (; n < oldBlocks.length && max > size; n++) {
size += oldBlocks[n].getNumBytes();
}
if (n >= oldBlocks.length) {
return size;
}
// starting from block n, the data is beyond max.
// resize the array.
truncateBlocksTo(n);
// collect the blocks beyond max
if (collectedBlocks != null) {
for (; n < oldBlocks.length; n++) {
final BlockInfo del = oldBlocks[n];
if (toRetain == null || !toRetain.contains(del)) {
collectedBlocks.addDeleteBlock(del);
}
}
}
return size;
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.
the class INodeFile method concatBlocks.
/**
* append array of blocks to this.blocks
*/
void concatBlocks(INodeFile[] inodes, BlockManager bm) {
int size = this.blocks.length;
int totalAddedBlocks = 0;
for (INodeFile f : inodes) {
Preconditions.checkState(f.isStriped() == this.isStriped());
totalAddedBlocks += f.blocks.length;
}
BlockInfo[] newlist = new BlockInfo[size + totalAddedBlocks];
System.arraycopy(this.blocks, 0, newlist, 0, size);
for (INodeFile in : inodes) {
System.arraycopy(in.blocks, 0, newlist, size, in.blocks.length);
size += in.blocks.length;
}
setBlocks(newlist);
for (BlockInfo b : blocks) {
b.setBlockCollectionId(getId());
short oldRepl = b.getReplication();
short repl = getPreferredBlockReplication();
if (oldRepl != repl) {
bm.setReplication(oldRepl, repl, b);
}
}
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.
the class FSNamesystem method addCommittedBlocksToPending.
void addCommittedBlocksToPending(final INodeFile pendingFile) {
final BlockInfo[] blocks = pendingFile.getBlocks();
int i = blocks.length - numCommittedAllowed;
if (i < 0) {
i = 0;
}
for (; i < blocks.length; i++) {
final BlockInfo b = blocks[i];
if (b != null && b.getBlockUCState() == BlockUCState.COMMITTED) {
// b is COMMITTED but not yet COMPLETE, add it to pending replication.
blockManager.addExpectedReplicasToPending(b);
}
}
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.
the class FileDiffList method combineAndCollectSnapshotBlocks.
/**
* Copy blocks from the removed snapshot into the previous snapshot
* up to the file length of the latter.
* Collect unused blocks of the removed snapshot.
*/
void combineAndCollectSnapshotBlocks(INode.ReclaimContext reclaimContext, INodeFile file, FileDiff removed) {
BlockInfo[] removedBlocks = removed.getBlocks();
if (removedBlocks == null) {
FileWithSnapshotFeature sf = file.getFileWithSnapshotFeature();
assert sf != null : "FileWithSnapshotFeature is null";
if (sf.isCurrentFileDeleted())
sf.collectBlocksAndClear(reclaimContext, file);
return;
}
int p = getPrior(removed.getSnapshotId(), true);
FileDiff earlierDiff = p == Snapshot.NO_SNAPSHOT_ID ? null : getDiffById(p);
// Copy blocks to the previous snapshot if not set already
if (earlierDiff != null) {
earlierDiff.setBlocks(removedBlocks);
}
BlockInfo[] earlierBlocks = (earlierDiff == null ? new BlockInfoContiguous[] {} : earlierDiff.getBlocks());
// Find later snapshot (or file itself) with blocks
BlockInfo[] laterBlocks = findLaterSnapshotBlocks(removed.getSnapshotId());
laterBlocks = (laterBlocks == null) ? file.getBlocks() : laterBlocks;
// Skip blocks, which belong to either the earlier or the later lists
int i = 0;
for (; i < removedBlocks.length; i++) {
if (i < earlierBlocks.length && removedBlocks[i] == earlierBlocks[i])
continue;
if (i < laterBlocks.length && removedBlocks[i] == laterBlocks[i])
continue;
break;
}
// Check if last block is part of truncate recovery
BlockInfo lastBlock = file.getLastBlock();
Block dontRemoveBlock = null;
if (lastBlock != null && lastBlock.getBlockUCState().equals(HdfsServerConstants.BlockUCState.UNDER_RECOVERY)) {
dontRemoveBlock = lastBlock.getUnderConstructionFeature().getTruncateBlock();
}
// Collect the remaining blocks of the file, ignoring truncate block
for (; i < removedBlocks.length; i++) {
if (dontRemoveBlock == null || !removedBlocks[i].equals(dontRemoveBlock)) {
reclaimContext.collectedBlocks().addDeleteBlock(removedBlocks[i]);
}
}
}
Aggregations