use of org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature in project hadoop by apache.
the class INodeFile method storagespaceConsumedContiguous.
public final QuotaCounts storagespaceConsumedContiguous(BlockStoragePolicy bsp) {
QuotaCounts counts = new QuotaCounts.Builder().build();
final Iterable<BlockInfo> blocks;
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
if (sf == null) {
blocks = Arrays.asList(getBlocks());
} else {
// Collect all distinct blocks
Set<BlockInfo> allBlocks = new HashSet<>(Arrays.asList(getBlocks()));
List<FileDiff> diffs = sf.getDiffs().asList();
for (FileDiff diff : diffs) {
BlockInfo[] diffBlocks = diff.getBlocks();
if (diffBlocks != null) {
allBlocks.addAll(Arrays.asList(diffBlocks));
}
}
blocks = allBlocks;
}
final short replication = getPreferredBlockReplication();
for (BlockInfo b : blocks) {
long blockSize = b.isComplete() ? b.getNumBytes() : getPreferredBlockSize();
counts.addStorageSpace(blockSize * replication);
if (bsp != null) {
List<StorageType> types = bsp.chooseStorageTypes(replication);
for (StorageType t : types) {
if (t.supportTypeQuota()) {
counts.addTypeSpace(t, blockSize);
}
}
}
}
return counts;
}
use of org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature in project hadoop by apache.
the class INodeFile method recordModification.
public void recordModification(final int latestSnapshotId, boolean withBlocks) {
if (isInLatestSnapshot(latestSnapshotId) && !shouldRecordInSrcSnapshot(latestSnapshotId)) {
// the file is in snapshot, create a snapshot feature if it does not have
FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
if (sf == null) {
sf = addSnapshotFeature(null);
}
// record self in the diff list if necessary
sf.getDiffs().saveSelf2Snapshot(latestSnapshotId, this, null, withBlocks);
}
}
use of org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature in project hadoop by apache.
the class INodeFile method getPreferredBlockReplication.
public short getPreferredBlockReplication() {
short max = getFileReplication(CURRENT_STATE_ID);
FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
if (sf != null) {
short maxInSnapshot = sf.getMaxBlockRepInDiffs(null);
if (sf.isCurrentFileDeleted()) {
return maxInSnapshot;
}
max = maxInSnapshot > max ? maxInSnapshot : max;
}
if (!isStriped()) {
return max;
}
ErasureCodingPolicy ecPolicy = ErasureCodingPolicyManager.getPolicyByID(getErasureCodingPolicyID());
Preconditions.checkNotNull(ecPolicy, "Could not find EC policy with ID 0x" + StringUtils.byteToHexString(getErasureCodingPolicyID()));
return (short) (ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits());
}
use of org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature in project hadoop by apache.
the class INodeFile method getSnapshotBlocksToRetain.
/** Exclude blocks collected for deletion that belong to a snapshot. */
Set<BlockInfo> getSnapshotBlocksToRetain(int snapshotId) {
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
if (sf == null) {
return null;
}
BlockInfo[] snapshotBlocks = getDiffs().findEarlierSnapshotBlocks(snapshotId);
if (snapshotBlocks == null) {
return null;
}
Set<BlockInfo> toRetain = new HashSet<>(snapshotBlocks.length);
Collections.addAll(toRetain, snapshotBlocks);
return toRetain;
}
use of org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature in project hadoop by apache.
the class INodeFile method destroyAndCollectBlocks.
@Override
public void destroyAndCollectBlocks(ReclaimContext reclaimContext) {
// TODO pass in the storage policy
reclaimContext.quotaDelta().add(computeQuotaUsage(reclaimContext.bsps, false));
clearFile(reclaimContext);
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
if (sf != null) {
sf.getDiffs().destroyAndCollectSnapshotBlocks(reclaimContext.collectedBlocks);
sf.clearDiffs();
}
if (isUnderConstruction() && reclaimContext.removedUCFiles != null) {
reclaimContext.removedUCFiles.add(getId());
}
}
Aggregations