Search in sources :

Example 11 with QuotaCounts

use of org.apache.hadoop.hdfs.server.namenode.QuotaCounts in project hadoop by apache.

the class DirectoryWithSnapshotFeature method cleanDirectory.

public void cleanDirectory(INode.ReclaimContext reclaimContext, final INodeDirectory currentINode, final int snapshot, int prior) {
    Map<INode, INode> priorCreated = null;
    Map<INode, INode> priorDeleted = null;
    QuotaCounts old = reclaimContext.quotaDelta().getCountsCopy();
    if (snapshot == Snapshot.CURRENT_STATE_ID) {
        // delete the current directory
        currentINode.recordModification(prior);
        // delete everything in created list
        DirectoryDiff lastDiff = diffs.getLast();
        if (lastDiff != null) {
            lastDiff.diff.destroyCreatedList(reclaimContext, currentINode);
        }
        currentINode.cleanSubtreeRecursively(reclaimContext, snapshot, prior, null);
    } else {
        // update prior
        prior = getDiffs().updatePrior(snapshot, prior);
        // its original created and deleted list before deleting post
        if (prior != NO_SNAPSHOT_ID) {
            DirectoryDiff priorDiff = this.getDiffs().getDiffById(prior);
            if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
                List<INode> cList = priorDiff.diff.getList(ListType.CREATED);
                List<INode> dList = priorDiff.diff.getList(ListType.DELETED);
                priorCreated = cloneDiffList(cList);
                priorDeleted = cloneDiffList(dList);
            }
        }
        getDiffs().deleteSnapshotDiff(reclaimContext, snapshot, prior, currentINode);
        currentINode.cleanSubtreeRecursively(reclaimContext, snapshot, prior, priorDeleted);
        // check priorDiff again since it may be created during the diff deletion
        if (prior != NO_SNAPSHOT_ID) {
            DirectoryDiff priorDiff = this.getDiffs().getDiffById(prior);
            if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
                // cleanSubtreeRecursively call.
                if (priorCreated != null) {
                    // we only check the node originally in prior's created list
                    for (INode cNode : priorDiff.getChildrenDiff().getList(ListType.CREATED)) {
                        if (priorCreated.containsKey(cNode)) {
                            cNode.cleanSubtree(reclaimContext, snapshot, NO_SNAPSHOT_ID);
                        }
                    }
                }
                for (INode dNode : priorDiff.getChildrenDiff().getList(ListType.DELETED)) {
                    if (priorDeleted == null || !priorDeleted.containsKey(dNode)) {
                        cleanDeletedINode(reclaimContext, dNode, snapshot, prior);
                    }
                }
            }
        }
    }
    QuotaCounts current = reclaimContext.quotaDelta().getCountsCopy();
    current.subtract(old);
    if (currentINode.isQuotaSet()) {
        reclaimContext.quotaDelta().addQuotaDirUpdate(currentINode, current);
    }
}
Also used : INode(org.apache.hadoop.hdfs.server.namenode.INode) QuotaCounts(org.apache.hadoop.hdfs.server.namenode.QuotaCounts)

Example 12 with QuotaCounts

use of org.apache.hadoop.hdfs.server.namenode.QuotaCounts in project hadoop by apache.

the class FileWithSnapshotFeature method updateQuotaAndCollectBlocks.

public void updateQuotaAndCollectBlocks(INode.ReclaimContext reclaimContext, INodeFile file, FileDiff removed) {
    byte storagePolicyID = file.getStoragePolicyID();
    BlockStoragePolicy bsp = null;
    if (storagePolicyID != HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) {
        bsp = reclaimContext.storagePolicySuite().getPolicy(file.getStoragePolicyID());
    }
    QuotaCounts oldCounts;
    if (removed.snapshotINode != null) {
        oldCounts = new QuotaCounts.Builder().build();
        BlockInfo[] blocks = file.getBlocks() == null ? new BlockInfo[0] : file.getBlocks();
        for (BlockInfo b : blocks) {
            short replication = b.getReplication();
            long blockSize = b.isComplete() ? b.getNumBytes() : file.getPreferredBlockSize();
            oldCounts.addStorageSpace(blockSize * replication);
            if (bsp != null) {
                List<StorageType> oldTypeChosen = bsp.chooseStorageTypes(replication);
                for (StorageType t : oldTypeChosen) {
                    if (t.supportTypeQuota()) {
                        oldCounts.addTypeSpace(t, blockSize);
                    }
                }
            }
        }
        AclFeature aclFeature = removed.getSnapshotINode().getAclFeature();
        if (aclFeature != null) {
            AclStorage.removeAclFeature(aclFeature);
        }
    } else {
        oldCounts = file.storagespaceConsumed(null);
    }
    getDiffs().combineAndCollectSnapshotBlocks(reclaimContext, file, removed);
    if (file.getBlocks() != null) {
        short replInDiff = getMaxBlockRepInDiffs(removed);
        short repl = (short) Math.max(file.getPreferredBlockReplication(), replInDiff);
        for (BlockInfo b : file.getBlocks()) {
            if (repl != b.getReplication()) {
                reclaimContext.collectedBlocks().addUpdateReplicationFactor(b, repl);
            }
        }
    }
    QuotaCounts current = file.storagespaceConsumed(bsp);
    reclaimContext.quotaDelta().add(oldCounts.subtract(current));
}
Also used : StorageType(org.apache.hadoop.fs.StorageType) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy) QuotaCounts(org.apache.hadoop.hdfs.server.namenode.QuotaCounts) AclFeature(org.apache.hadoop.hdfs.server.namenode.AclFeature)

Aggregations

QuotaCounts (org.apache.hadoop.hdfs.server.namenode.QuotaCounts)12 INode (org.apache.hadoop.hdfs.server.namenode.INode)8 Test (org.junit.Test)7 Path (org.apache.hadoop.fs.Path)6 INodeDirectory (org.apache.hadoop.hdfs.server.namenode.INodeDirectory)6 INodesInPath (org.apache.hadoop.hdfs.server.namenode.INodesInPath)6 DirectoryDiff (org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff)5 BlockStoragePolicy (org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)3 INodeReference (org.apache.hadoop.hdfs.server.namenode.INodeReference)3 WithCount (org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount)3 NSQuotaExceededException (org.apache.hadoop.hdfs.protocol.NSQuotaExceededException)2 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)2 FSDirectory (org.apache.hadoop.hdfs.server.namenode.FSDirectory)2 INodeFile (org.apache.hadoop.hdfs.server.namenode.INodeFile)2 ChildrenDiff (org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.ChildrenDiff)2 IOException (java.io.IOException)1 ArrayList (java.util.ArrayList)1 StorageType (org.apache.hadoop.fs.StorageType)1 Block (org.apache.hadoop.hdfs.protocol.Block)1 BlockInfoContiguous (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous)1