use of org.apache.hadoop.hdfs.server.namenode.FSNamesystem in project hadoop by apache.
the class TestReadStripedFileWithDecoding method testInvalidateBlock.
@Test
public void testInvalidateBlock() throws IOException {
final Path file = new Path("/invalidate");
final int length = 10;
final byte[] bytes = StripedFileTestUtil.generateBytes(length);
DFSTestUtil.writeFile(fs, file, bytes);
int dnIndex = findFirstDataNode(file, cellSize * dataBlocks);
Assert.assertNotEquals(-1, dnIndex);
LocatedStripedBlock slb = (LocatedStripedBlock) fs.getClient().getLocatedBlocks(file.toString(), 0, cellSize * dataBlocks).get(0);
final LocatedBlock[] blks = StripedBlockUtil.parseStripedBlockGroup(slb, cellSize, dataBlocks, parityBlocks);
final Block b = blks[0].getBlock().getLocalBlock();
DataNode dn = cluster.getDataNodes().get(dnIndex);
// disable the heartbeat from DN so that the invalidated block record is kept
// in NameNode until heartbeat expires and NN mark the dn as dead
DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
try {
// delete the file
fs.delete(file, true);
// check the block is added to invalidateBlocks
final FSNamesystem fsn = cluster.getNamesystem();
final BlockManager bm = fsn.getBlockManager();
DatanodeDescriptor dnd = NameNodeAdapter.getDatanode(fsn, dn.getDatanodeId());
Assert.assertTrue(bm.containsInvalidateBlock(blks[0].getLocations()[0], b) || dnd.containsInvalidateBlock(b));
} finally {
DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, false);
}
}
use of org.apache.hadoop.hdfs.server.namenode.FSNamesystem in project hadoop by apache.
the class TestSafeMode method testRbwBlocksNotConsideredUnderReplicated.
/**
* Test that, when under-replicated blocks are processed at the end of
* safe-mode, blocks currently under construction are not considered
* under-construction or missing. Regression test for HDFS-2822.
*/
@Test
public void testRbwBlocksNotConsideredUnderReplicated() throws IOException {
List<FSDataOutputStream> stms = Lists.newArrayList();
try {
// Create some junk blocks so that the NN doesn't just immediately
// exit safemode on restart.
DFSTestUtil.createFile(fs, new Path("/junk-blocks"), BLOCK_SIZE * 4, (short) 1, 1L);
// hide this bug from the test!
for (int i = 0; i < 10; i++) {
FSDataOutputStream stm = fs.create(new Path("/append-" + i), true, BLOCK_SIZE, (short) 1, BLOCK_SIZE);
stms.add(stm);
stm.write(1);
stm.hflush();
}
cluster.restartNameNode();
FSNamesystem ns = cluster.getNameNode(0).getNamesystem();
BlockManagerTestUtil.updateState(ns.getBlockManager());
assertEquals(0, ns.getPendingReplicationBlocks());
assertEquals(0, ns.getCorruptReplicaBlocks());
assertEquals(0, ns.getMissingBlocksCount());
} finally {
for (FSDataOutputStream stm : stms) {
IOUtils.closeStream(stm);
}
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.server.namenode.FSNamesystem in project hadoop by apache.
the class TestBlockManager method testBlockManagerMachinesArray.
@Test
public void testBlockManagerMachinesArray() throws Exception {
final Configuration conf = new HdfsConfiguration();
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
cluster.waitActive();
BlockManager blockManager = cluster.getNamesystem().getBlockManager();
FileSystem fs = cluster.getFileSystem();
final Path filePath = new Path("/tmp.txt");
final long fileLen = 1L;
DFSTestUtil.createFile(fs, filePath, fileLen, (short) 3, 1L);
ArrayList<DataNode> datanodes = cluster.getDataNodes();
assertEquals(datanodes.size(), 4);
FSNamesystem ns = cluster.getNamesystem();
// get the block
final String bpid = cluster.getNamesystem().getBlockPoolId();
File storageDir = cluster.getInstanceStorageDir(0, 0);
File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
assertTrue("Data directory does not exist", dataDir.exists());
BlockInfo blockInfo = blockManager.blocksMap.getBlocks().iterator().next();
ExtendedBlock blk = new ExtendedBlock(bpid, blockInfo.getBlockId(), blockInfo.getNumBytes(), blockInfo.getGenerationStamp());
DatanodeDescriptor failedStorageDataNode = blockManager.getStoredBlock(blockInfo).getDatanode(0);
DatanodeDescriptor corruptStorageDataNode = blockManager.getStoredBlock(blockInfo).getDatanode(1);
ArrayList<StorageReport> reports = new ArrayList<StorageReport>();
for (int i = 0; i < failedStorageDataNode.getStorageInfos().length; i++) {
DatanodeStorageInfo storageInfo = failedStorageDataNode.getStorageInfos()[i];
DatanodeStorage dns = new DatanodeStorage(failedStorageDataNode.getStorageInfos()[i].getStorageID(), DatanodeStorage.State.FAILED, failedStorageDataNode.getStorageInfos()[i].getStorageType());
while (storageInfo.getBlockIterator().hasNext()) {
BlockInfo blockInfo1 = storageInfo.getBlockIterator().next();
if (blockInfo1.equals(blockInfo)) {
StorageReport report = new StorageReport(dns, true, storageInfo.getCapacity(), storageInfo.getDfsUsed(), storageInfo.getRemaining(), storageInfo.getBlockPoolUsed(), 0L);
reports.add(report);
break;
}
}
}
failedStorageDataNode.updateHeartbeat(reports.toArray(StorageReport.EMPTY_ARRAY), 0L, 0L, 0, 0, null);
ns.writeLock();
DatanodeStorageInfo corruptStorageInfo = null;
for (int i = 0; i < corruptStorageDataNode.getStorageInfos().length; i++) {
corruptStorageInfo = corruptStorageDataNode.getStorageInfos()[i];
while (corruptStorageInfo.getBlockIterator().hasNext()) {
BlockInfo blockInfo1 = corruptStorageInfo.getBlockIterator().next();
if (blockInfo1.equals(blockInfo)) {
break;
}
}
}
blockManager.findAndMarkBlockAsCorrupt(blk, corruptStorageDataNode, corruptStorageInfo.getStorageID(), CorruptReplicasMap.Reason.ANY.toString());
ns.writeUnlock();
BlockInfo[] blockInfos = new BlockInfo[] { blockInfo };
ns.readLock();
LocatedBlocks locatedBlocks = blockManager.createLocatedBlocks(blockInfos, 3L, false, 0L, 3L, false, false, null, null);
assertTrue("Located Blocks should exclude corrupt" + "replicas and failed storages", locatedBlocks.getLocatedBlocks().size() == 1);
ns.readUnlock();
}
use of org.apache.hadoop.hdfs.server.namenode.FSNamesystem in project hadoop by apache.
the class TestBlockManager method testStorageWithRemainingCapacity.
/**
* Tests that a namenode doesn't choose a datanode with full disks to
* store blocks.
* @throws Exception
*/
@Test
public void testStorageWithRemainingCapacity() throws Exception {
final Configuration conf = new HdfsConfiguration();
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = FileSystem.get(conf);
Path file1 = null;
try {
cluster.waitActive();
final FSNamesystem namesystem = cluster.getNamesystem();
final String poolId = namesystem.getBlockPoolId();
final DatanodeRegistration nodeReg = InternalDataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
final DatanodeDescriptor dd = NameNodeAdapter.getDatanode(namesystem, nodeReg);
//create a file with 100k.
for (DatanodeStorageInfo storage : dd.getStorageInfos()) {
storage.setUtilizationForTesting(65536, 0, 65536, 0);
}
//sum of the remaining capacity of both the storages
dd.setRemaining(131072);
file1 = new Path("testRemainingStorage.dat");
try {
DFSTestUtil.createFile(fs, file1, 102400, 102400, 102400, (short) 1, 0x1BAD5EED);
} catch (RemoteException re) {
GenericTestUtils.assertExceptionContains("nodes instead of " + "minReplication", re);
}
} finally {
// Clean up
assertTrue(fs.exists(file1));
fs.delete(file1, true);
assertTrue(!fs.exists(file1));
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.server.namenode.FSNamesystem in project hadoop by apache.
the class TestPendingReconstruction method testProcessPendingReconstructions.
/* Test that processpendingReconstructions will use the most recent
* blockinfo from the blocksmap by placing a larger genstamp into
* the blocksmap.
*/
@Test
public void testProcessPendingReconstructions() throws Exception {
final Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, TIMEOUT);
MiniDFSCluster cluster = null;
Block block;
BlockInfo blockInfo;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_COUNT).build();
cluster.waitActive();
FSNamesystem fsn = cluster.getNamesystem();
BlockManager blkManager = fsn.getBlockManager();
PendingReconstructionBlocks pendingReconstruction = blkManager.pendingReconstruction;
LowRedundancyBlocks neededReconstruction = blkManager.neededReconstruction;
BlocksMap blocksMap = blkManager.blocksMap;
//
// Add 1 block to pendingReconstructions with GenerationStamp = 0.
//
block = new Block(1, 1, 0);
blockInfo = new BlockInfoContiguous(block, (short) 3);
pendingReconstruction.increment(blockInfo, DatanodeStorageInfo.toDatanodeDescriptors(DFSTestUtil.createDatanodeStorageInfos(1)));
BlockCollection bc = Mockito.mock(BlockCollection.class);
// Place into blocksmap with GenerationStamp = 1
blockInfo.setGenerationStamp(1);
blocksMap.addBlockCollection(blockInfo, bc);
assertEquals("Size of pendingReconstructions ", 1, pendingReconstruction.size());
// Add a second block to pendingReconstructions that has no
// corresponding entry in blocksmap
block = new Block(2, 2, 0);
blockInfo = new BlockInfoContiguous(block, (short) 3);
pendingReconstruction.increment(blockInfo, DatanodeStorageInfo.toDatanodeDescriptors(DFSTestUtil.createDatanodeStorageInfos(1)));
// verify 2 blocks in pendingReconstructions
assertEquals("Size of pendingReconstructions ", 2, pendingReconstruction.size());
//
while (pendingReconstruction.size() > 0) {
try {
Thread.sleep(100);
} catch (Exception e) {
}
}
//
while (neededReconstruction.size() == 0) {
try {
Thread.sleep(100);
} catch (Exception e) {
}
}
// is now 1
for (Block b : neededReconstruction) {
assertEquals("Generation stamp is 1 ", 1, b.getGenerationStamp());
}
// Verify size of neededReconstruction is exactly 1.
assertEquals("size of neededReconstruction is 1 ", 1, neededReconstruction.size());
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
Aggregations