use of org.apache.hadoop.fs.StorageType in project hadoop by apache.
the class DatanodeStats method add.
synchronized void add(final DatanodeDescriptor node) {
xceiverCount += node.getXceiverCount();
if (node.isInService()) {
capacityUsed += node.getDfsUsed();
capacityUsedNonDfs += node.getNonDfsUsed();
blockPoolUsed += node.getBlockPoolUsed();
nodesInService++;
nodesInServiceXceiverCount += node.getXceiverCount();
capacityTotal += node.getCapacity();
capacityRemaining += node.getRemaining();
cacheCapacity += node.getCacheCapacity();
cacheUsed += node.getCacheUsed();
} else if (node.isDecommissionInProgress() || node.isEnteringMaintenance()) {
cacheCapacity += node.getCacheCapacity();
cacheUsed += node.getCacheUsed();
}
Set<StorageType> storageTypes = new HashSet<>();
for (DatanodeStorageInfo storageInfo : node.getStorageInfos()) {
if (storageInfo.getState() != DatanodeStorage.State.FAILED) {
statsMap.addStorage(storageInfo, node);
storageTypes.add(storageInfo.getStorageType());
}
}
for (StorageType storageType : storageTypes) {
statsMap.addNode(storageType, node);
}
}
use of org.apache.hadoop.fs.StorageType in project hadoop by apache.
the class StorageLocation method parse.
/**
* Attempt to parse a storage uri with storage class and URI. The storage
* class component of the uri is case-insensitive.
*
* @param rawLocation Location string of the format [type]uri, where [type] is
* optional.
* @return A StorageLocation object if successfully parsed, null otherwise.
* Does not throw any exceptions.
*/
public static StorageLocation parse(String rawLocation) throws IOException, SecurityException {
Matcher matcher = regex.matcher(rawLocation);
StorageType storageType = StorageType.DEFAULT;
String location = rawLocation;
if (matcher.matches()) {
String classString = matcher.group(1);
location = matcher.group(2).trim();
if (!classString.isEmpty()) {
storageType = StorageType.valueOf(StringUtils.toUpperCase(classString));
}
}
//"/a/b" and "/a/b/" are represented in a consistent manner
return new StorageLocation(storageType, new Path(location).toUri());
}
use of org.apache.hadoop.fs.StorageType in project hadoop by apache.
the class AvailableSpaceVolumeChoosingPolicy method chooseVolume.
@Override
public V chooseVolume(List<V> volumes, long replicaSize) throws IOException {
if (volumes.size() < 1) {
throw new DiskOutOfSpaceException("No more available volumes");
}
// As all the items in volumes are with the same storage type,
// so only need to get the storage type index of the first item in volumes
StorageType storageType = volumes.get(0).getStorageType();
int index = storageType != null ? storageType.ordinal() : StorageType.DEFAULT.ordinal();
synchronized (syncLocks[index]) {
return doChooseVolume(volumes, replicaSize);
}
}
use of org.apache.hadoop.fs.StorageType in project hadoop by apache.
the class RoundRobinVolumeChoosingPolicy method chooseVolume.
@Override
public V chooseVolume(final List<V> volumes, long blockSize) throws IOException {
if (volumes.size() < 1) {
throw new DiskOutOfSpaceException("No more available volumes");
}
// As all the items in volumes are with the same storage type,
// so only need to get the storage type index of the first item in volumes
StorageType storageType = volumes.get(0).getStorageType();
int index = storageType != null ? storageType.ordinal() : StorageType.DEFAULT.ordinal();
synchronized (syncLocks[index]) {
return chooseVolume(index, volumes, blockSize);
}
}
use of org.apache.hadoop.fs.StorageType in project hadoop by apache.
the class DataStreamer method nextBlockOutputStream.
/**
* Open a DataStreamer to a DataNode so that it can be written to.
* This happens when a file is created and each time a new block is allocated.
* Must get block ID and the IDs of the destinations from the namenode.
* Returns the list of target datanodes.
*/
protected LocatedBlock nextBlockOutputStream() throws IOException {
LocatedBlock lb;
DatanodeInfo[] nodes;
StorageType[] storageTypes;
int count = dfsClient.getConf().getNumBlockWriteRetry();
boolean success;
final ExtendedBlock oldBlock = block.getCurrentBlock();
do {
errorState.resetInternalError();
lastException.clear();
DatanodeInfo[] excluded = getExcludedNodes();
lb = locateFollowingBlock(excluded.length > 0 ? excluded : null, oldBlock);
block.setCurrentBlock(lb.getBlock());
block.setNumBytes(0);
bytesSent = 0;
accessToken = lb.getBlockToken();
nodes = lb.getLocations();
storageTypes = lb.getStorageTypes();
// Connect to first DataNode in the list.
success = createBlockOutputStream(nodes, storageTypes, 0L, false);
if (!success) {
LOG.warn("Abandoning " + block);
dfsClient.namenode.abandonBlock(block.getCurrentBlock(), stat.getFileId(), src, dfsClient.clientName);
block.setCurrentBlock(null);
final DatanodeInfo badNode = nodes[errorState.getBadNodeIndex()];
LOG.warn("Excluding datanode " + badNode);
excludedNodes.put(badNode, badNode);
}
} while (!success && --count >= 0);
if (!success) {
throw new IOException("Unable to create new block.");
}
return lb;
}
Aggregations