Search in sources :

Example 16 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class PBHelperClient method addStorageTypes.

private static void addStorageTypes(HdfsProtos.StorageTypeQuotaInfosProto typeQuotaInfos, QuotaUsage.Builder builder) {
    for (HdfsProtos.StorageTypeQuotaInfoProto info : typeQuotaInfos.getTypeQuotaInfoList()) {
        StorageType type = convertStorageType(info.getType());
        builder.typeConsumed(type, info.getConsumed());
        builder.typeQuota(type, info.getQuota());
    }
}
Also used : HdfsProtos(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos) StorageType(org.apache.hadoop.fs.StorageType)

Example 17 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class DFSTopologyNodeImpl method remove.

@Override
public boolean remove(Node n) {
    if (!isAncestor(n)) {
        throw new IllegalArgumentException(n.getName() + ", which is located at " + n.getNetworkLocation() + ", is not a descendant of " + getPath(this));
    }
    // In HDFS topology, the leaf node should always be DatanodeDescriptor
    if (!(n instanceof DatanodeDescriptor)) {
        throw new IllegalArgumentException("Unexpected node type " + n.getClass().getName());
    }
    DatanodeDescriptor dnDescriptor = (DatanodeDescriptor) n;
    if (isParent(n)) {
        // this node is the parent of n; remove n directly
        if (childrenMap.containsKey(n.getName())) {
            for (int i = 0; i < children.size(); i++) {
                if (children.get(i).getName().equals(n.getName())) {
                    children.remove(i);
                    childrenMap.remove(n.getName());
                    childrenStorageInfo.remove(dnDescriptor.getName());
                    for (StorageType st : dnDescriptor.getStorageTypes()) {
                        decStorageTypeCount(st);
                    }
                    numOfLeaves--;
                    n.setParent(null);
                    return true;
                }
            }
        }
        return false;
    } else {
        // find the next ancestor node: the parent node
        String parentName = getNextAncestorName(n);
        DFSTopologyNodeImpl parentNode = (DFSTopologyNodeImpl) childrenMap.get(parentName);
        if (parentNode == null) {
            return false;
        }
        // remove n from the parent node
        boolean isRemoved = parentNode.remove(n);
        if (isRemoved) {
            // if the parent node has no children, remove the parent node too
            EnumMap<StorageType, Integer> currentCount = childrenStorageInfo.get(parentNode.getName());
            EnumSet<StorageType> toRemove = EnumSet.noneOf(StorageType.class);
            for (StorageType st : dnDescriptor.getStorageTypes()) {
                int newCount = currentCount.get(st) - 1;
                if (newCount == 0) {
                    toRemove.add(st);
                }
                currentCount.put(st, newCount);
            }
            for (StorageType st : toRemove) {
                currentCount.remove(st);
            }
            for (StorageType st : dnDescriptor.getStorageTypes()) {
                decStorageTypeCount(st);
            }
            if (parentNode.getNumOfChildren() == 0) {
                for (int i = 0; i < children.size(); i++) {
                    if (children.get(i).getName().equals(parentName)) {
                        children.remove(i);
                        childrenMap.remove(parentName);
                        childrenStorageInfo.remove(parentNode.getName());
                        break;
                    }
                }
            }
            numOfLeaves--;
        }
        return isRemoved;
    }
}
Also used : DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) StorageType(org.apache.hadoop.fs.StorageType)

Example 18 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class DFSTopologyNodeImpl method add.

@Override
public boolean add(Node n) {
    if (!isAncestor(n)) {
        throw new IllegalArgumentException(n.getName() + ", which is located at " + n.getNetworkLocation() + ", is not a descendant of " + getPath(this));
    }
    // In HDFS topology, the leaf node should always be DatanodeDescriptor
    if (!(n instanceof DatanodeDescriptor)) {
        throw new IllegalArgumentException("Unexpected node type " + n.getClass().getName());
    }
    DatanodeDescriptor dnDescriptor = (DatanodeDescriptor) n;
    if (isParent(n)) {
        // this node is the parent of n; add n directly
        n.setParent(this);
        n.setLevel(this.level + 1);
        Node prev = childrenMap.put(n.getName(), n);
        if (prev != null) {
            for (int i = 0; i < children.size(); i++) {
                if (children.get(i).getName().equals(n.getName())) {
                    children.set(i, n);
                    return false;
                }
            }
        }
        children.add(n);
        numOfLeaves++;
        if (!childrenStorageInfo.containsKey(dnDescriptor.getName())) {
            childrenStorageInfo.put(dnDescriptor.getName(), new EnumMap<>(StorageType.class));
        }
        for (StorageType st : dnDescriptor.getStorageTypes()) {
            childrenStorageInfo.get(dnDescriptor.getName()).put(st, 1);
            incStorageTypeCount(st);
        }
        return true;
    } else {
        // find the next ancestor node
        String parentName = getNextAncestorName(n);
        InnerNode parentNode = (InnerNode) childrenMap.get(parentName);
        if (parentNode == null) {
            // create a new InnerNode
            parentNode = createParentNode(parentName);
            children.add(parentNode);
            childrenMap.put(parentNode.getName(), parentNode);
        }
        // add n to the subtree of the next ancestor node
        if (parentNode.add(n)) {
            numOfLeaves++;
            if (!childrenStorageInfo.containsKey(parentNode.getName())) {
                childrenStorageInfo.put(parentNode.getName(), new EnumMap<>(StorageType.class));
                for (StorageType st : dnDescriptor.getStorageTypes()) {
                    childrenStorageInfo.get(parentNode.getName()).put(st, 1);
                }
            } else {
                EnumMap<StorageType, Integer> currentCount = childrenStorageInfo.get(parentNode.getName());
                for (StorageType st : dnDescriptor.getStorageTypes()) {
                    if (currentCount.containsKey(st)) {
                        currentCount.put(st, currentCount.get(st) + 1);
                    } else {
                        currentCount.put(st, 1);
                    }
                }
            }
            for (StorageType st : dnDescriptor.getStorageTypes()) {
                incStorageTypeCount(st);
            }
            return true;
        } else {
            return false;
        }
    }
}
Also used : DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) StorageType(org.apache.hadoop.fs.StorageType) InnerNode(org.apache.hadoop.net.InnerNode) Node(org.apache.hadoop.net.Node) InnerNode(org.apache.hadoop.net.InnerNode)

Example 19 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class FSDirAppendOp method computeQuotaDeltaForUCBlock.

/** Compute quota change for converting a complete block to a UC block. */
private static QuotaCounts computeQuotaDeltaForUCBlock(FSNamesystem fsn, INodeFile file) {
    final QuotaCounts delta = new QuotaCounts.Builder().build();
    final BlockInfo lastBlock = file.getLastBlock();
    if (lastBlock != null) {
        final long diff = file.getPreferredBlockSize() - lastBlock.getNumBytes();
        final short repl = lastBlock.getReplication();
        delta.addStorageSpace(diff * repl);
        final BlockStoragePolicy policy = fsn.getFSDirectory().getBlockStoragePolicySuite().getPolicy(file.getStoragePolicyID());
        List<StorageType> types = policy.chooseStorageTypes(repl);
        for (StorageType t : types) {
            if (t.supportTypeQuota()) {
                delta.addTypeSpace(t, diff);
            }
        }
    }
    return delta;
}
Also used : StorageType(org.apache.hadoop.fs.StorageType) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)

Example 20 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class FSDirConcatOp method computeQuotaDeltas.

private static QuotaCounts computeQuotaDeltas(FSDirectory fsd, INodeFile target, INodeFile[] srcList) {
    QuotaCounts deltas = new QuotaCounts.Builder().build();
    final short targetRepl = target.getPreferredBlockReplication();
    for (INodeFile src : srcList) {
        short srcRepl = src.getFileReplication();
        long fileSize = src.computeFileSize();
        if (targetRepl != srcRepl) {
            deltas.addStorageSpace(fileSize * (targetRepl - srcRepl));
            BlockStoragePolicy bsp = fsd.getBlockStoragePolicySuite().getPolicy(src.getStoragePolicyID());
            if (bsp != null) {
                List<StorageType> srcTypeChosen = bsp.chooseStorageTypes(srcRepl);
                for (StorageType t : srcTypeChosen) {
                    if (t.supportTypeQuota()) {
                        deltas.addTypeSpace(t, -fileSize);
                    }
                }
                List<StorageType> targetTypeChosen = bsp.chooseStorageTypes(targetRepl);
                for (StorageType t : targetTypeChosen) {
                    if (t.supportTypeQuota()) {
                        deltas.addTypeSpace(t, fileSize);
                    }
                }
            }
        }
    }
    return deltas;
}
Also used : StorageType(org.apache.hadoop.fs.StorageType) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)

Aggregations

StorageType (org.apache.hadoop.fs.StorageType)94 Test (org.junit.Test)31 Path (org.apache.hadoop.fs.Path)27 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)25 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)19 Configuration (org.apache.hadoop.conf.Configuration)17 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)16 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)14 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)13 BlockStoragePolicy (org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)12 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)11 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)11 IOException (java.io.IOException)10 ArrayList (java.util.ArrayList)9 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)6 ByteString (com.google.protobuf.ByteString)5 InterruptedIOException (java.io.InterruptedIOException)5 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)5 File (java.io.File)4 InetSocketAddress (java.net.InetSocketAddress)4