use of org.apache.hadoop.fs.StorageType in project hadoop by apache.
the class BlockManager method chooseExcessRedundancies.
private void chooseExcessRedundancies(final Collection<DatanodeStorageInfo> nonExcess, BlockInfo storedBlock, short replication, DatanodeDescriptor addedNode, DatanodeDescriptor delNodeHint) {
assert namesystem.hasWriteLock();
// first form a rack to datanodes map and
BlockCollection bc = getBlockCollection(storedBlock);
if (storedBlock.isStriped()) {
chooseExcessRedundancyStriped(bc, nonExcess, storedBlock, delNodeHint);
} else {
final BlockStoragePolicy storagePolicy = storagePolicySuite.getPolicy(bc.getStoragePolicyID());
final List<StorageType> excessTypes = storagePolicy.chooseExcess(replication, DatanodeStorageInfo.toStorageTypes(nonExcess));
chooseExcessRedundancyContiguous(nonExcess, storedBlock, replication, addedNode, delNodeHint, excessTypes);
}
}
use of org.apache.hadoop.fs.StorageType in project hadoop by apache.
the class BlockPlacementPolicyDefault method getRequiredStorageTypes.
private EnumMap<StorageType, Integer> getRequiredStorageTypes(List<StorageType> types) {
EnumMap<StorageType, Integer> map = new EnumMap<>(StorageType.class);
for (StorageType type : types) {
if (!map.containsKey(type)) {
map.put(type, 1);
} else {
int num = map.get(type);
map.put(type, num + 1);
}
}
return map;
}
use of org.apache.hadoop.fs.StorageType in project hadoop by apache.
the class DatanodeStats method subtract.
synchronized void subtract(final DatanodeDescriptor node) {
xceiverCount -= node.getXceiverCount();
if (node.isInService()) {
capacityUsed -= node.getDfsUsed();
capacityUsedNonDfs -= node.getNonDfsUsed();
blockPoolUsed -= node.getBlockPoolUsed();
nodesInService--;
nodesInServiceXceiverCount -= node.getXceiverCount();
capacityTotal -= node.getCapacity();
capacityRemaining -= node.getRemaining();
cacheCapacity -= node.getCacheCapacity();
cacheUsed -= node.getCacheUsed();
} else if (node.isDecommissionInProgress() || node.isEnteringMaintenance()) {
cacheCapacity -= node.getCacheCapacity();
cacheUsed -= node.getCacheUsed();
}
Set<StorageType> storageTypes = new HashSet<>();
for (DatanodeStorageInfo storageInfo : node.getStorageInfos()) {
if (storageInfo.getState() != DatanodeStorage.State.FAILED) {
statsMap.subtractStorage(storageInfo, node);
storageTypes.add(storageInfo.getStorageType());
}
}
for (StorageType storageType : storageTypes) {
statsMap.subtractNode(storageType, node);
}
}
use of org.apache.hadoop.fs.StorageType in project hadoop by apache.
the class TestMover method createFileWithFavoredDatanodes.
private void createFileWithFavoredDatanodes(final Configuration conf, final MiniDFSCluster cluster, final DistributedFileSystem dfs) throws IOException {
// Adding two DISK based data node to the cluster.
// Also, ensure that blocks are pinned in these new data nodes.
StorageType[][] newtypes = new StorageType[][] { { StorageType.DISK }, { StorageType.DISK } };
startAdditionalDNs(conf, 2, newtypes, cluster);
ArrayList<DataNode> dataNodes = cluster.getDataNodes();
InetSocketAddress[] favoredNodes = new InetSocketAddress[2];
int j = 0;
for (int i = dataNodes.size() - 1; i >= 2; i--) {
favoredNodes[j++] = dataNodes.get(i).getXferAddress();
}
final String file = "/parent/testMoverFailedRetryWithPinnedBlocks2";
final FSDataOutputStream out = dfs.create(new Path(file), FsPermission.getDefault(), true, DEFAULT_BLOCK_SIZE, (short) 2, DEFAULT_BLOCK_SIZE, null, favoredNodes);
byte[] fileData = StripedFileTestUtil.generateBytes(DEFAULT_BLOCK_SIZE * 2);
out.write(fileData);
out.close();
// Mock FsDatasetSpi#getPinning to show that the block is pinned.
LocatedBlocks locatedBlocks = dfs.getClient().getLocatedBlocks(file, 0);
Assert.assertEquals("Wrong block count", 2, locatedBlocks.locatedBlockCount());
LocatedBlock lb = locatedBlocks.get(0);
DatanodeInfo datanodeInfo = lb.getLocations()[0];
for (DataNode dn : cluster.getDataNodes()) {
if (dn.getDatanodeId().getDatanodeUuid().equals(datanodeInfo.getDatanodeUuid())) {
LOG.info("Simulate block pinning in datanode {}", datanodeInfo);
DataNodeTestUtils.mockDatanodeBlkPinning(dn, true);
break;
}
}
}
use of org.apache.hadoop.fs.StorageType in project hadoop by apache.
the class TestMover method testTwoReplicaSameStorageTypeShouldNotSelect.
@Test(timeout = 300000)
public void testTwoReplicaSameStorageTypeShouldNotSelect() throws Exception {
// HDFS-8147
final Configuration conf = new HdfsConfiguration();
initConf(conf);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).storageTypes(new StorageType[][] { { StorageType.DISK, StorageType.ARCHIVE }, { StorageType.DISK, StorageType.DISK }, { StorageType.DISK, StorageType.ARCHIVE } }).build();
try {
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final String file = "/testForTwoReplicaSameStorageTypeShouldNotSelect";
// write to DISK
final FSDataOutputStream out = dfs.create(new Path(file), (short) 2);
out.writeChars("testForTwoReplicaSameStorageTypeShouldNotSelect");
out.close();
// verify before movement
LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
StorageType[] storageTypes = lb.getStorageTypes();
for (StorageType storageType : storageTypes) {
Assert.assertTrue(StorageType.DISK == storageType);
}
// move to ARCHIVE
dfs.setStoragePolicy(new Path(file), "COLD");
int rc = ToolRunner.run(conf, new Mover.Cli(), new String[] { "-p", file.toString() });
Assert.assertEquals("Movement to ARCHIVE should be successful", 0, rc);
// Wait till namenode notified about the block location details
waitForLocatedBlockWithArchiveStorageType(dfs, file, 2);
} finally {
cluster.shutdown();
}
}
Aggregations