use of org.apache.hadoop.fs.StorageType in project hadoop by apache.
the class PBHelper method convertBlockECReconstructionInfo.
public static BlockECReconstructionInfo convertBlockECReconstructionInfo(BlockECReconstructionInfoProto blockEcReconstructionInfoProto) {
ExtendedBlockProto blockProto = blockEcReconstructionInfoProto.getBlock();
ExtendedBlock block = PBHelperClient.convert(blockProto);
DatanodeInfosProto sourceDnInfosProto = blockEcReconstructionInfoProto.getSourceDnInfos();
DatanodeInfo[] sourceDnInfos = PBHelperClient.convert(sourceDnInfosProto);
DatanodeInfosProto targetDnInfosProto = blockEcReconstructionInfoProto.getTargetDnInfos();
DatanodeInfo[] targetDnInfos = PBHelperClient.convert(targetDnInfosProto);
HdfsProtos.StorageUuidsProto targetStorageUuidsProto = blockEcReconstructionInfoProto.getTargetStorageUuids();
String[] targetStorageUuids = convert(targetStorageUuidsProto);
StorageTypesProto targetStorageTypesProto = blockEcReconstructionInfoProto.getTargetStorageTypes();
StorageType[] convertStorageTypes = PBHelperClient.convertStorageTypes(targetStorageTypesProto.getStorageTypesList(), targetStorageTypesProto.getStorageTypesList().size());
byte[] liveBlkIndices = blockEcReconstructionInfoProto.getLiveBlockIndices().toByteArray();
ErasureCodingPolicy ecPolicy = PBHelperClient.convertErasureCodingPolicy(blockEcReconstructionInfoProto.getEcPolicy());
return new BlockECReconstructionInfo(block, sourceDnInfos, targetDnInfos, targetStorageUuids, convertStorageTypes, liveBlkIndices, ecPolicy);
}
use of org.apache.hadoop.fs.StorageType in project hadoop by apache.
the class Balancer method init.
/**
* Given a datanode storage set, build a network topology and decide
* over-utilized storages, above average utilized storages,
* below average utilized storages, and underutilized storages.
* The input datanode storage set is shuffled in order to randomize
* to the storage matching later on.
*
* @return the number of bytes needed to move in order to balance the cluster.
*/
private long init(List<DatanodeStorageReport> reports) {
// compute average utilization
for (DatanodeStorageReport r : reports) {
policy.accumulateSpaces(r);
}
policy.initAvgUtilization();
// create network topology and classify utilization collections:
// over-utilized, above-average, below-average and under-utilized.
long overLoadedBytes = 0L, underLoadedBytes = 0L;
for (DatanodeStorageReport r : reports) {
final DDatanode dn = dispatcher.newDatanode(r.getDatanodeInfo());
final boolean isSource = Util.isIncluded(sourceNodes, dn.getDatanodeInfo());
for (StorageType t : StorageType.getMovableTypes()) {
final Double utilization = policy.getUtilization(r, t);
if (utilization == null) {
// datanode does not have such storage type
continue;
}
final double average = policy.getAvgUtilization(t);
if (utilization >= average && !isSource) {
LOG.info(dn + "[" + t + "] has utilization=" + utilization + " >= average=" + average + " but it is not specified as a source; skipping it.");
continue;
}
final double utilizationDiff = utilization - average;
final long capacity = getCapacity(r, t);
final double thresholdDiff = Math.abs(utilizationDiff) - threshold;
final long maxSize2Move = computeMaxSize2Move(capacity, getRemaining(r, t), utilizationDiff, maxSizeToMove);
final StorageGroup g;
if (utilizationDiff > 0) {
final Source s = dn.addSource(t, maxSize2Move, dispatcher);
if (thresholdDiff <= 0) {
// within threshold
aboveAvgUtilized.add(s);
} else {
overLoadedBytes += percentage2bytes(thresholdDiff, capacity);
overUtilized.add(s);
}
g = s;
} else {
g = dn.addTarget(t, maxSize2Move);
if (thresholdDiff <= 0) {
// within threshold
belowAvgUtilized.add(g);
} else {
underLoadedBytes += percentage2bytes(thresholdDiff, capacity);
underUtilized.add(g);
}
}
dispatcher.getStorageGroupMap().put(g);
}
}
logUtilizationCollections();
Preconditions.checkState(dispatcher.getStorageGroupMap().size() == overUtilized.size() + underUtilized.size() + aboveAvgUtilized.size() + belowAvgUtilized.size(), "Mismatched number of storage groups");
// return number of bytes to be moved in order to make the cluster balanced
return Math.max(overLoadedBytes, underLoadedBytes);
}
use of org.apache.hadoop.fs.StorageType in project hadoop by apache.
the class StoragePolicySummary method getStoragePolicy.
/**
*
* @param storageTypes - sorted array of storageTypes
* @return Storage Policy which matches the specific storage Combination
*/
private BlockStoragePolicy getStoragePolicy(StorageType[] storageTypes) {
for (BlockStoragePolicy storagePolicy : storagePolicies) {
StorageType[] policyStorageTypes = storagePolicy.getStorageTypes();
policyStorageTypes = Arrays.copyOf(policyStorageTypes, policyStorageTypes.length);
Arrays.sort(policyStorageTypes);
if (policyStorageTypes.length <= storageTypes.length) {
int i = 0;
for (; i < policyStorageTypes.length; i++) {
if (policyStorageTypes[i] != storageTypes[i]) {
break;
}
}
if (i < policyStorageTypes.length) {
continue;
}
int j = policyStorageTypes.length;
for (; j < storageTypes.length; j++) {
if (policyStorageTypes[i - 1] != storageTypes[j]) {
break;
}
}
if (j == storageTypes.length) {
return storagePolicy;
}
}
}
return null;
}
use of org.apache.hadoop.fs.StorageType in project hadoop by apache.
the class FsDatasetImpl method addVolume.
@Override
public void addVolume(final StorageLocation location, final List<NamespaceInfo> nsInfos) throws IOException {
// Prepare volume in DataStorage
final DataStorage.VolumeBuilder builder;
try {
builder = dataStorage.prepareVolume(datanode, location, nsInfos);
} catch (IOException e) {
volumes.addVolumeFailureInfo(new VolumeFailureInfo(location, Time.now()));
throw e;
}
final Storage.StorageDirectory sd = builder.getStorageDirectory();
StorageType storageType = location.getStorageType();
final FsVolumeImpl fsVolume = createFsVolume(sd.getStorageUuid(), sd, location);
final ReplicaMap tempVolumeMap = new ReplicaMap(new AutoCloseableLock());
ArrayList<IOException> exceptions = Lists.newArrayList();
for (final NamespaceInfo nsInfo : nsInfos) {
String bpid = nsInfo.getBlockPoolID();
try {
fsVolume.addBlockPool(bpid, this.conf, this.timer);
fsVolume.getVolumeMap(bpid, tempVolumeMap, ramDiskReplicaTracker);
} catch (IOException e) {
LOG.warn("Caught exception when adding " + fsVolume + ". Will throw later.", e);
exceptions.add(e);
}
}
if (!exceptions.isEmpty()) {
try {
sd.unlock();
} catch (IOException e) {
exceptions.add(e);
}
throw MultipleIOException.createIOException(exceptions);
}
final FsVolumeReference ref = fsVolume.obtainReference();
setupAsyncLazyPersistThread(fsVolume);
builder.build();
activateVolume(tempVolumeMap, sd, storageType, ref);
LOG.info("Added volume - " + location + ", StorageType: " + storageType);
}
use of org.apache.hadoop.fs.StorageType in project hadoop by apache.
the class TestPBHelper method createLocatedBlock.
private LocatedBlock createLocatedBlock() {
DatanodeInfo[] dnInfos = { DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1", AdminStates.DECOMMISSION_INPROGRESS), DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2", AdminStates.DECOMMISSIONED), DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3", AdminStates.NORMAL), DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h4", AdminStates.NORMAL) };
String[] storageIDs = { "s1", "s2", "s3", "s4" };
StorageType[] media = { StorageType.DISK, StorageType.SSD, StorageType.DISK, StorageType.RAM_DISK };
LocatedBlock lb = new LocatedBlock(new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, storageIDs, media, 5, false, new DatanodeInfo[] {});
lb.setBlockToken(new Token<BlockTokenIdentifier>("identifier".getBytes(), "password".getBytes(), new Text("kind"), new Text("service")));
return lb;
}
Aggregations