use of org.apache.hadoop.hdfs.server.protocol.DatanodeStorage in project hadoop by apache.
the class FsDatasetImpl method shutdownBlockPool.
@Override
public void shutdownBlockPool(String bpid) {
try (AutoCloseableLock lock = datasetLock.acquire()) {
LOG.info("Removing block pool " + bpid);
Map<DatanodeStorage, BlockListAsLongs> blocksPerVolume = getBlockReports(bpid);
volumeMap.cleanUpBlockPool(bpid);
volumes.removeBlockPool(bpid, blocksPerVolume);
}
}
use of org.apache.hadoop.hdfs.server.protocol.DatanodeStorage in project hadoop by apache.
the class FsDatasetImpl method getBlockReports.
@Override
public Map<DatanodeStorage, BlockListAsLongs> getBlockReports(String bpid) {
Map<DatanodeStorage, BlockListAsLongs> blockReportsMap = new HashMap<DatanodeStorage, BlockListAsLongs>();
Map<String, BlockListAsLongs.Builder> builders = new HashMap<String, BlockListAsLongs.Builder>();
List<FsVolumeImpl> curVolumes = null;
try (AutoCloseableLock lock = datasetLock.acquire()) {
curVolumes = volumes.getVolumes();
for (FsVolumeSpi v : curVolumes) {
builders.put(v.getStorageID(), BlockListAsLongs.builder(maxDataLength));
}
Set<String> missingVolumesReported = new HashSet<>();
for (ReplicaInfo b : volumeMap.replicas(bpid)) {
String volStorageID = b.getVolume().getStorageID();
if (!builders.containsKey(volStorageID)) {
if (!missingVolumesReported.contains(volStorageID)) {
LOG.warn("Storage volume: " + volStorageID + " missing for the" + " replica block: " + b + ". Probably being removed!");
missingVolumesReported.add(volStorageID);
}
continue;
}
switch(b.getState()) {
case FINALIZED:
case RBW:
case RWR:
builders.get(b.getVolume().getStorageID()).add(b);
break;
case RUR:
ReplicaInfo orig = b.getOriginalReplica();
builders.get(b.getVolume().getStorageID()).add(orig);
break;
case TEMPORARY:
break;
default:
assert false : "Illegal ReplicaInfo state.";
}
}
}
for (FsVolumeImpl v : curVolumes) {
blockReportsMap.put(v.toDatanodeStorage(), builders.get(v.getStorageID()).build());
}
return blockReportsMap;
}
use of org.apache.hadoop.hdfs.server.protocol.DatanodeStorage in project hadoop by apache.
the class TestAddStripedBlocks method testCheckStripedReplicaCorrupt.
@Test
public void testCheckStripedReplicaCorrupt() throws Exception {
final int numBlocks = 4;
final int numStripes = 4;
final Path filePath = new Path("/corrupt");
final FSNamesystem ns = cluster.getNameNode().getNamesystem();
final BlockManager bm = ns.getBlockManager();
DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks, numStripes, false);
INodeFile fileNode = ns.getFSDirectory().getINode(filePath.toString()).asFile();
Assert.assertTrue(fileNode.isStriped());
BlockInfo stored = fileNode.getBlocks()[0];
BlockManagerTestUtil.updateState(ns.getBlockManager());
Assert.assertEquals(0, ns.getCorruptReplicaBlocks());
// Now send a block report with correct size
DatanodeStorage storage = new DatanodeStorage(UUID.randomUUID().toString());
final Block reported = new Block(stored);
reported.setNumBytes(numStripes * cellSize);
StorageReceivedDeletedBlocks[] reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
ns.processIncrementalBlockReport(cluster.getDataNodes().get(0).getDatanodeId(), reports[0]);
BlockManagerTestUtil.updateState(ns.getBlockManager());
Assert.assertEquals(0, ns.getCorruptReplicaBlocks());
// Now send a block report with wrong size
reported.setBlockId(stored.getBlockId() + 1);
reported.setNumBytes(numStripes * cellSize - 1);
reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
ns.processIncrementalBlockReport(cluster.getDataNodes().get(1).getDatanodeId(), reports[0]);
BlockManagerTestUtil.updateState(ns.getBlockManager());
Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
// Now send a parity block report with correct size
reported.setBlockId(stored.getBlockId() + dataBlocks);
reported.setNumBytes(numStripes * cellSize);
reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
ns.processIncrementalBlockReport(cluster.getDataNodes().get(2).getDatanodeId(), reports[0]);
BlockManagerTestUtil.updateState(ns.getBlockManager());
Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
// Now send a parity block report with wrong size
reported.setBlockId(stored.getBlockId() + dataBlocks);
reported.setNumBytes(numStripes * cellSize + 1);
reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
ns.processIncrementalBlockReport(cluster.getDataNodes().get(3).getDatanodeId(), reports[0]);
BlockManagerTestUtil.updateState(ns.getBlockManager());
// the total number of corrupted block info is still 1
Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
// 2 internal blocks corrupted
Assert.assertEquals(2, bm.getCorruptReplicas(stored).size());
// Now change the size of stored block, and test verifying the last
// block size
stored.setNumBytes(stored.getNumBytes() + 10);
reported.setBlockId(stored.getBlockId() + dataBlocks + 2);
reported.setNumBytes(numStripes * cellSize);
reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
ns.processIncrementalBlockReport(cluster.getDataNodes().get(4).getDatanodeId(), reports[0]);
BlockManagerTestUtil.updateState(ns.getBlockManager());
Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
Assert.assertEquals(3, bm.getCorruptReplicas(stored).size());
// Now send a parity block report with correct size based on adjusted
// size of stored block
/** Now stored block has {@link numStripes} full stripes + a cell + 10 */
stored.setNumBytes(stored.getNumBytes() + cellSize);
reported.setBlockId(stored.getBlockId());
reported.setNumBytes((numStripes + 1) * cellSize);
reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
ns.processIncrementalBlockReport(cluster.getDataNodes().get(0).getDatanodeId(), reports[0]);
BlockManagerTestUtil.updateState(ns.getBlockManager());
Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
Assert.assertEquals(3, bm.getCorruptReplicas(stored).size());
reported.setBlockId(stored.getBlockId() + 1);
reported.setNumBytes(numStripes * cellSize + 10);
reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
ns.processIncrementalBlockReport(cluster.getDataNodes().get(0).getDatanodeId(), reports[0]);
BlockManagerTestUtil.updateState(ns.getBlockManager());
Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
Assert.assertEquals(3, bm.getCorruptReplicas(stored).size());
reported.setBlockId(stored.getBlockId() + dataBlocks);
reported.setNumBytes((numStripes + 1) * cellSize);
reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
ns.processIncrementalBlockReport(cluster.getDataNodes().get(2).getDatanodeId(), reports[0]);
BlockManagerTestUtil.updateState(ns.getBlockManager());
Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
Assert.assertEquals(3, bm.getCorruptReplicas(stored).size());
}
use of org.apache.hadoop.hdfs.server.protocol.DatanodeStorage in project hadoop by apache.
the class DFSTestUtil method createDatanodeStorageInfo.
public static DatanodeStorageInfo createDatanodeStorageInfo(String storageID, String ip, String rack, String hostname, StorageType type, String upgradeDomain) {
final DatanodeStorage storage = new DatanodeStorage(storageID, DatanodeStorage.State.NORMAL, type);
final DatanodeDescriptor dn = BlockManagerTestUtil.getDatanodeDescriptor(ip, rack, storage, hostname);
if (upgradeDomain != null) {
dn.setUpgradeDomain(upgradeDomain);
}
return BlockManagerTestUtil.newDatanodeStorageInfo(dn, storage);
}
use of org.apache.hadoop.hdfs.server.protocol.DatanodeStorage in project hadoop by apache.
the class TestBlockStoragePolicy method testChooseSsdOverDisk.
@Test
public void testChooseSsdOverDisk() throws Exception {
BlockStoragePolicy policy = new BlockStoragePolicy((byte) 9, "TEST1", new StorageType[] { StorageType.SSD, StorageType.DISK, StorageType.ARCHIVE }, new StorageType[] {}, new StorageType[] {});
final String[] racks = { "/d1/r1", "/d1/r1", "/d1/r1" };
final String[] hosts = { "host1", "host2", "host3" };
final StorageType[] disks = { StorageType.DISK, StorageType.DISK, StorageType.DISK };
final DatanodeStorageInfo[] diskStorages = DFSTestUtil.createDatanodeStorageInfos(3, racks, hosts, disks);
final DatanodeDescriptor[] dataNodes = DFSTestUtil.toDatanodeDescriptor(diskStorages);
for (int i = 0; i < dataNodes.length; i++) {
BlockManagerTestUtil.updateStorage(dataNodes[i], new DatanodeStorage("ssd" + i, DatanodeStorage.State.NORMAL, StorageType.SSD));
}
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(baseDir, "name").getPath());
DFSTestUtil.formatNameNode(conf);
NameNode namenode = new NameNode(conf);
final BlockManager bm = namenode.getNamesystem().getBlockManager();
BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy();
NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
for (DatanodeDescriptor datanode : dataNodes) {
cluster.add(datanode);
}
DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3, dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false, new HashSet<Node>(), 0, policy, null);
System.out.println(policy.getName() + ": " + Arrays.asList(targets));
Assert.assertEquals(2, targets.length);
Assert.assertEquals(StorageType.SSD, targets[0].getStorageType());
Assert.assertEquals(StorageType.DISK, targets[1].getStorageType());
}
Aggregations