Search in sources :

Example 1 with FileDiff

use of org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff in project hadoop by apache.

the class INodeFile method computeQuotaDeltaForTruncate.

/**
   * compute the quota usage change for a truncate op
   * @param newLength the length for truncation
   * TODO: properly handle striped blocks (HDFS-7622)
   **/
void computeQuotaDeltaForTruncate(long newLength, BlockStoragePolicy bsps, QuotaCounts delta) {
    final BlockInfo[] blocks = getBlocks();
    if (blocks.length == 0) {
        return;
    }
    long size = 0;
    for (BlockInfo b : blocks) {
        size += b.getNumBytes();
    }
    BlockInfo[] sblocks = null;
    FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
    if (sf != null) {
        FileDiff diff = sf.getDiffs().getLast();
        sblocks = diff != null ? diff.getBlocks() : null;
    }
    for (int i = blocks.length - 1; i >= 0 && size > newLength; size -= blocks[i].getNumBytes(), --i) {
        BlockInfo bi = blocks[i];
        long truncatedBytes;
        if (size - newLength < bi.getNumBytes()) {
            // Record a full block as the last block will be copied during
            // recovery
            truncatedBytes = bi.getNumBytes() - getPreferredBlockSize();
        } else {
            truncatedBytes = bi.getNumBytes();
        }
        // existing files
        if (sblocks != null && i < sblocks.length && bi.equals(sblocks[i])) {
            truncatedBytes -= bi.getNumBytes();
        }
        delta.addStorageSpace(-truncatedBytes * bi.getReplication());
        if (bsps != null) {
            List<StorageType> types = bsps.chooseStorageTypes(bi.getReplication());
            for (StorageType t : types) {
                if (t.supportTypeQuota()) {
                    delta.addTypeSpace(t, -truncatedBytes);
                }
            }
        }
    }
}
Also used : FileWithSnapshotFeature(org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature) StorageType(org.apache.hadoop.fs.StorageType) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) FileDiff(org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff)

Example 2 with FileDiff

use of org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff in project hadoop by apache.

the class TestTruncateQuotaUpdate method addSnapshotFeature.

private static void addSnapshotFeature(INodeFile file, BlockInfo[] blocks) {
    FileDiff diff = mock(FileDiff.class);
    when(diff.getBlocks()).thenReturn(blocks);
    FileDiffList diffList = new FileDiffList();
    @SuppressWarnings("unchecked") ArrayList<FileDiff> diffs = ((ArrayList<FileDiff>) Whitebox.getInternalState(diffList, "diffs"));
    diffs.add(diff);
    FileWithSnapshotFeature sf = new FileWithSnapshotFeature(diffList);
    file.addFeature(sf);
}
Also used : FileWithSnapshotFeature(org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature) FileDiffList(org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList) FileDiff(org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff)

Example 3 with FileDiff

use of org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff in project hadoop by apache.

the class INodeFile method storagespaceConsumedContiguous.

public final QuotaCounts storagespaceConsumedContiguous(BlockStoragePolicy bsp) {
    QuotaCounts counts = new QuotaCounts.Builder().build();
    final Iterable<BlockInfo> blocks;
    FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
    if (sf == null) {
        blocks = Arrays.asList(getBlocks());
    } else {
        // Collect all distinct blocks
        Set<BlockInfo> allBlocks = new HashSet<>(Arrays.asList(getBlocks()));
        List<FileDiff> diffs = sf.getDiffs().asList();
        for (FileDiff diff : diffs) {
            BlockInfo[] diffBlocks = diff.getBlocks();
            if (diffBlocks != null) {
                allBlocks.addAll(Arrays.asList(diffBlocks));
            }
        }
        blocks = allBlocks;
    }
    final short replication = getPreferredBlockReplication();
    for (BlockInfo b : blocks) {
        long blockSize = b.isComplete() ? b.getNumBytes() : getPreferredBlockSize();
        counts.addStorageSpace(blockSize * replication);
        if (bsp != null) {
            List<StorageType> types = bsp.chooseStorageTypes(replication);
            for (StorageType t : types) {
                if (t.supportTypeQuota()) {
                    counts.addTypeSpace(t, blockSize);
                }
            }
        }
    }
    return counts;
}
Also used : FileWithSnapshotFeature(org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature) StorageType(org.apache.hadoop.fs.StorageType) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) FileDiff(org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff) HashSet(java.util.HashSet)

Example 4 with FileDiff

use of org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff in project hadoop by apache.

the class INodeFile method getBlocks.

/** @return blocks of the file corresponding to the snapshot. */
public BlockInfo[] getBlocks(int snapshot) {
    if (snapshot == CURRENT_STATE_ID || getDiffs() == null) {
        return getBlocks();
    }
    // find blocks stored in snapshot diffs (for truncate)
    FileDiff diff = getDiffs().getDiffById(snapshot);
    // note that currently FileDiff can only store contiguous blocks
    BlockInfo[] snapshotBlocks = diff == null ? getBlocks() : diff.getBlocks();
    if (snapshotBlocks != null) {
        return snapshotBlocks;
    }
    // Blocks are not in the current snapshot
    // Find next snapshot with blocks present or return current file blocks
    snapshotBlocks = getDiffs().findLaterSnapshotBlocks(snapshot);
    return (snapshotBlocks == null) ? getBlocks() : snapshotBlocks;
}
Also used : BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) FileDiff(org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff)

Aggregations

FileDiff (org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff)4 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)3 FileWithSnapshotFeature (org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature)3 StorageType (org.apache.hadoop.fs.StorageType)2 HashSet (java.util.HashSet)1 FileDiffList (org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList)1