Search in sources :

Example 6 with FileWithSnapshotFeature

use of org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature in project hadoop by apache.

the class INodeFile method storagespaceConsumedContiguous.

public final QuotaCounts storagespaceConsumedContiguous(BlockStoragePolicy bsp) {
    QuotaCounts counts = new QuotaCounts.Builder().build();
    final Iterable<BlockInfo> blocks;
    FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
    if (sf == null) {
        blocks = Arrays.asList(getBlocks());
    } else {
        // Collect all distinct blocks
        Set<BlockInfo> allBlocks = new HashSet<>(Arrays.asList(getBlocks()));
        List<FileDiff> diffs = sf.getDiffs().asList();
        for (FileDiff diff : diffs) {
            BlockInfo[] diffBlocks = diff.getBlocks();
            if (diffBlocks != null) {
                allBlocks.addAll(Arrays.asList(diffBlocks));
            }
        }
        blocks = allBlocks;
    }
    final short replication = getPreferredBlockReplication();
    for (BlockInfo b : blocks) {
        long blockSize = b.isComplete() ? b.getNumBytes() : getPreferredBlockSize();
        counts.addStorageSpace(blockSize * replication);
        if (bsp != null) {
            List<StorageType> types = bsp.chooseStorageTypes(replication);
            for (StorageType t : types) {
                if (t.supportTypeQuota()) {
                    counts.addTypeSpace(t, blockSize);
                }
            }
        }
    }
    return counts;
}
Also used : FileWithSnapshotFeature(org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature) StorageType(org.apache.hadoop.fs.StorageType) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) FileDiff(org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff) HashSet(java.util.HashSet)

Example 7 with FileWithSnapshotFeature

use of org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature in project hadoop by apache.

the class INodeFile method recordModification.

public void recordModification(final int latestSnapshotId, boolean withBlocks) {
    if (isInLatestSnapshot(latestSnapshotId) && !shouldRecordInSrcSnapshot(latestSnapshotId)) {
        // the file is in snapshot, create a snapshot feature if it does not have
        FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
        if (sf == null) {
            sf = addSnapshotFeature(null);
        }
        // record self in the diff list if necessary
        sf.getDiffs().saveSelf2Snapshot(latestSnapshotId, this, null, withBlocks);
    }
}
Also used : FileWithSnapshotFeature(org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature)

Example 8 with FileWithSnapshotFeature

use of org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature in project hadoop by apache.

the class INodeFile method getPreferredBlockReplication.

public short getPreferredBlockReplication() {
    short max = getFileReplication(CURRENT_STATE_ID);
    FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
    if (sf != null) {
        short maxInSnapshot = sf.getMaxBlockRepInDiffs(null);
        if (sf.isCurrentFileDeleted()) {
            return maxInSnapshot;
        }
        max = maxInSnapshot > max ? maxInSnapshot : max;
    }
    if (!isStriped()) {
        return max;
    }
    ErasureCodingPolicy ecPolicy = ErasureCodingPolicyManager.getPolicyByID(getErasureCodingPolicyID());
    Preconditions.checkNotNull(ecPolicy, "Could not find EC policy with ID 0x" + StringUtils.byteToHexString(getErasureCodingPolicyID()));
    return (short) (ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits());
}
Also used : FileWithSnapshotFeature(org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy)

Example 9 with FileWithSnapshotFeature

use of org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature in project hadoop by apache.

the class INodeFile method getSnapshotBlocksToRetain.

/** Exclude blocks collected for deletion that belong to a snapshot. */
Set<BlockInfo> getSnapshotBlocksToRetain(int snapshotId) {
    FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
    if (sf == null) {
        return null;
    }
    BlockInfo[] snapshotBlocks = getDiffs().findEarlierSnapshotBlocks(snapshotId);
    if (snapshotBlocks == null) {
        return null;
    }
    Set<BlockInfo> toRetain = new HashSet<>(snapshotBlocks.length);
    Collections.addAll(toRetain, snapshotBlocks);
    return toRetain;
}
Also used : FileWithSnapshotFeature(org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) HashSet(java.util.HashSet)

Example 10 with FileWithSnapshotFeature

use of org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature in project hadoop by apache.

the class INodeFile method destroyAndCollectBlocks.

@Override
public void destroyAndCollectBlocks(ReclaimContext reclaimContext) {
    // TODO pass in the storage policy
    reclaimContext.quotaDelta().add(computeQuotaUsage(reclaimContext.bsps, false));
    clearFile(reclaimContext);
    FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
    if (sf != null) {
        sf.getDiffs().destroyAndCollectSnapshotBlocks(reclaimContext.collectedBlocks);
        sf.clearDiffs();
    }
    if (isUnderConstruction() && reclaimContext.removedUCFiles != null) {
        reclaimContext.removedUCFiles.add(getId());
    }
}
Also used : FileWithSnapshotFeature(org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature)

Aggregations

FileWithSnapshotFeature (org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature)10 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)4 StorageType (org.apache.hadoop.fs.StorageType)3 FileDiff (org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff)3 HashSet (java.util.HashSet)2 FileDiffList (org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList)2 BlockStoragePolicy (org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)1 ErasureCodingPolicy (org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy)1