use of org.apache.hadoop.fs.StorageType in project hadoop by apache.
the class PBHelperClient method addStorageTypes.
private static void addStorageTypes(HdfsProtos.StorageTypeQuotaInfosProto typeQuotaInfos, QuotaUsage.Builder builder) {
for (HdfsProtos.StorageTypeQuotaInfoProto info : typeQuotaInfos.getTypeQuotaInfoList()) {
StorageType type = convertStorageType(info.getType());
builder.typeConsumed(type, info.getConsumed());
builder.typeQuota(type, info.getQuota());
}
}
use of org.apache.hadoop.fs.StorageType in project hadoop by apache.
the class DFSTopologyNodeImpl method remove.
@Override
public boolean remove(Node n) {
if (!isAncestor(n)) {
throw new IllegalArgumentException(n.getName() + ", which is located at " + n.getNetworkLocation() + ", is not a descendant of " + getPath(this));
}
// In HDFS topology, the leaf node should always be DatanodeDescriptor
if (!(n instanceof DatanodeDescriptor)) {
throw new IllegalArgumentException("Unexpected node type " + n.getClass().getName());
}
DatanodeDescriptor dnDescriptor = (DatanodeDescriptor) n;
if (isParent(n)) {
// this node is the parent of n; remove n directly
if (childrenMap.containsKey(n.getName())) {
for (int i = 0; i < children.size(); i++) {
if (children.get(i).getName().equals(n.getName())) {
children.remove(i);
childrenMap.remove(n.getName());
childrenStorageInfo.remove(dnDescriptor.getName());
for (StorageType st : dnDescriptor.getStorageTypes()) {
decStorageTypeCount(st);
}
numOfLeaves--;
n.setParent(null);
return true;
}
}
}
return false;
} else {
// find the next ancestor node: the parent node
String parentName = getNextAncestorName(n);
DFSTopologyNodeImpl parentNode = (DFSTopologyNodeImpl) childrenMap.get(parentName);
if (parentNode == null) {
return false;
}
// remove n from the parent node
boolean isRemoved = parentNode.remove(n);
if (isRemoved) {
// if the parent node has no children, remove the parent node too
EnumMap<StorageType, Integer> currentCount = childrenStorageInfo.get(parentNode.getName());
EnumSet<StorageType> toRemove = EnumSet.noneOf(StorageType.class);
for (StorageType st : dnDescriptor.getStorageTypes()) {
int newCount = currentCount.get(st) - 1;
if (newCount == 0) {
toRemove.add(st);
}
currentCount.put(st, newCount);
}
for (StorageType st : toRemove) {
currentCount.remove(st);
}
for (StorageType st : dnDescriptor.getStorageTypes()) {
decStorageTypeCount(st);
}
if (parentNode.getNumOfChildren() == 0) {
for (int i = 0; i < children.size(); i++) {
if (children.get(i).getName().equals(parentName)) {
children.remove(i);
childrenMap.remove(parentName);
childrenStorageInfo.remove(parentNode.getName());
break;
}
}
}
numOfLeaves--;
}
return isRemoved;
}
}
use of org.apache.hadoop.fs.StorageType in project hadoop by apache.
the class DFSTopologyNodeImpl method add.
@Override
public boolean add(Node n) {
if (!isAncestor(n)) {
throw new IllegalArgumentException(n.getName() + ", which is located at " + n.getNetworkLocation() + ", is not a descendant of " + getPath(this));
}
// In HDFS topology, the leaf node should always be DatanodeDescriptor
if (!(n instanceof DatanodeDescriptor)) {
throw new IllegalArgumentException("Unexpected node type " + n.getClass().getName());
}
DatanodeDescriptor dnDescriptor = (DatanodeDescriptor) n;
if (isParent(n)) {
// this node is the parent of n; add n directly
n.setParent(this);
n.setLevel(this.level + 1);
Node prev = childrenMap.put(n.getName(), n);
if (prev != null) {
for (int i = 0; i < children.size(); i++) {
if (children.get(i).getName().equals(n.getName())) {
children.set(i, n);
return false;
}
}
}
children.add(n);
numOfLeaves++;
if (!childrenStorageInfo.containsKey(dnDescriptor.getName())) {
childrenStorageInfo.put(dnDescriptor.getName(), new EnumMap<>(StorageType.class));
}
for (StorageType st : dnDescriptor.getStorageTypes()) {
childrenStorageInfo.get(dnDescriptor.getName()).put(st, 1);
incStorageTypeCount(st);
}
return true;
} else {
// find the next ancestor node
String parentName = getNextAncestorName(n);
InnerNode parentNode = (InnerNode) childrenMap.get(parentName);
if (parentNode == null) {
// create a new InnerNode
parentNode = createParentNode(parentName);
children.add(parentNode);
childrenMap.put(parentNode.getName(), parentNode);
}
// add n to the subtree of the next ancestor node
if (parentNode.add(n)) {
numOfLeaves++;
if (!childrenStorageInfo.containsKey(parentNode.getName())) {
childrenStorageInfo.put(parentNode.getName(), new EnumMap<>(StorageType.class));
for (StorageType st : dnDescriptor.getStorageTypes()) {
childrenStorageInfo.get(parentNode.getName()).put(st, 1);
}
} else {
EnumMap<StorageType, Integer> currentCount = childrenStorageInfo.get(parentNode.getName());
for (StorageType st : dnDescriptor.getStorageTypes()) {
if (currentCount.containsKey(st)) {
currentCount.put(st, currentCount.get(st) + 1);
} else {
currentCount.put(st, 1);
}
}
}
for (StorageType st : dnDescriptor.getStorageTypes()) {
incStorageTypeCount(st);
}
return true;
} else {
return false;
}
}
}
use of org.apache.hadoop.fs.StorageType in project hadoop by apache.
the class FSDirAppendOp method computeQuotaDeltaForUCBlock.
/** Compute quota change for converting a complete block to a UC block. */
private static QuotaCounts computeQuotaDeltaForUCBlock(FSNamesystem fsn, INodeFile file) {
final QuotaCounts delta = new QuotaCounts.Builder().build();
final BlockInfo lastBlock = file.getLastBlock();
if (lastBlock != null) {
final long diff = file.getPreferredBlockSize() - lastBlock.getNumBytes();
final short repl = lastBlock.getReplication();
delta.addStorageSpace(diff * repl);
final BlockStoragePolicy policy = fsn.getFSDirectory().getBlockStoragePolicySuite().getPolicy(file.getStoragePolicyID());
List<StorageType> types = policy.chooseStorageTypes(repl);
for (StorageType t : types) {
if (t.supportTypeQuota()) {
delta.addTypeSpace(t, diff);
}
}
}
return delta;
}
use of org.apache.hadoop.fs.StorageType in project hadoop by apache.
the class FSDirConcatOp method computeQuotaDeltas.
private static QuotaCounts computeQuotaDeltas(FSDirectory fsd, INodeFile target, INodeFile[] srcList) {
QuotaCounts deltas = new QuotaCounts.Builder().build();
final short targetRepl = target.getPreferredBlockReplication();
for (INodeFile src : srcList) {
short srcRepl = src.getFileReplication();
long fileSize = src.computeFileSize();
if (targetRepl != srcRepl) {
deltas.addStorageSpace(fileSize * (targetRepl - srcRepl));
BlockStoragePolicy bsp = fsd.getBlockStoragePolicySuite().getPolicy(src.getStoragePolicyID());
if (bsp != null) {
List<StorageType> srcTypeChosen = bsp.chooseStorageTypes(srcRepl);
for (StorageType t : srcTypeChosen) {
if (t.supportTypeQuota()) {
deltas.addTypeSpace(t, -fileSize);
}
}
List<StorageType> targetTypeChosen = bsp.chooseStorageTypes(targetRepl);
for (StorageType t : targetTypeChosen) {
if (t.supportTypeQuota()) {
deltas.addTypeSpace(t, fileSize);
}
}
}
}
}
return deltas;
}
Aggregations