use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.
the class TestReadStripedFileWithDecoding method testInvalidateBlock.
@Test
public void testInvalidateBlock() throws IOException {
final Path file = new Path("/invalidate");
final int length = 10;
final byte[] bytes = StripedFileTestUtil.generateBytes(length);
DFSTestUtil.writeFile(fs, file, bytes);
int dnIndex = findFirstDataNode(file, cellSize * dataBlocks);
Assert.assertNotEquals(-1, dnIndex);
LocatedStripedBlock slb = (LocatedStripedBlock) fs.getClient().getLocatedBlocks(file.toString(), 0, cellSize * dataBlocks).get(0);
final LocatedBlock[] blks = StripedBlockUtil.parseStripedBlockGroup(slb, cellSize, dataBlocks, parityBlocks);
final Block b = blks[0].getBlock().getLocalBlock();
DataNode dn = cluster.getDataNodes().get(dnIndex);
// disable the heartbeat from DN so that the invalidated block record is kept
// in NameNode until heartbeat expires and NN mark the dn as dead
DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
try {
// delete the file
fs.delete(file, true);
// check the block is added to invalidateBlocks
final FSNamesystem fsn = cluster.getNamesystem();
final BlockManager bm = fsn.getBlockManager();
DatanodeDescriptor dnd = NameNodeAdapter.getDatanode(fsn, dn.getDatanodeId());
Assert.assertTrue(bm.containsInvalidateBlock(blks[0].getLocations()[0], b) || dnd.containsInvalidateBlock(b));
} finally {
DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, false);
}
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.
the class TestDataNodeVolumeFailure method testUnderReplicationAfterVolFailure.
/**
* Test that there are under replication blocks after vol failures
*/
@Test
public void testUnderReplicationAfterVolFailure() throws Exception {
// The test uses DataNodeTestUtils#injectDataDirFailure() to simulate
// volume failures which is currently not supported on Windows.
assumeNotWindows();
// Bring up one more datanode
cluster.startDataNodes(conf, 1, true, null, null);
cluster.waitActive();
final BlockManager bm = cluster.getNamesystem().getBlockManager();
Path file1 = new Path("/test1");
DFSTestUtil.createFile(fs, file1, 1024, (short) 3, 1L);
DFSTestUtil.waitReplication(fs, file1, (short) 3);
// Fail the first volume on both datanodes
File dn1Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
File dn2Vol1 = new File(dataDir, "data" + (2 * 1 + 1));
DataNodeTestUtils.injectDataDirFailure(dn1Vol1, dn2Vol1);
Path file2 = new Path("/test2");
DFSTestUtil.createFile(fs, file2, 1024, (short) 3, 1L);
DFSTestUtil.waitReplication(fs, file2, (short) 3);
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
// underReplicatedBlocks are due to failed volumes
int underReplicatedBlocks = BlockManagerTestUtil.checkHeartbeatAndGetUnderReplicatedBlocksCount(cluster.getNamesystem(), bm);
if (underReplicatedBlocks > 0) {
return true;
}
LOG.info("There is no under replicated block after volume failure.");
return false;
}
}, 500, 60000);
}
Aggregations