use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.
the class TestFsck method testFsckWithDecommissionedReplicas.
/**
* Test for blocks on decommissioning hosts are not shown as missing.
*/
@Test
public void testFsckWithDecommissionedReplicas() throws Exception {
final short replFactor = 1;
short numDn = 2;
final long blockSize = 512;
final long fileSize = 1024;
boolean checkDecommissionInProgress = false;
String[] racks = { "/rack1", "/rack2" };
String[] hosts = { "host1", "host2" };
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
DistributedFileSystem dfs;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDn).hosts(hosts).racks(racks).build();
assertNotNull("Failed Cluster Creation", cluster);
cluster.waitClusterUp();
dfs = cluster.getFileSystem();
assertNotNull("Failed to get FileSystem", dfs);
DFSTestUtil util = new DFSTestUtil.Builder().setName(getClass().getSimpleName()).setNumFiles(1).build();
//create files
final String testFile = new String("/testfile");
final Path path = new Path(testFile);
util.createFile(dfs, path, fileSize, replFactor, 1000L);
util.waitReplication(dfs, path, replFactor);
// make sure datanode that has replica is fine before decommission
String outStr = runFsck(conf, 0, true, testFile);
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
// decommission datanode
FSNamesystem fsn = cluster.getNameNode().getNamesystem();
BlockManager bm = fsn.getBlockManager();
ExtendedBlock eb = util.getFirstBlock(dfs, path);
BlockCollection bc = null;
try {
fsn.writeLock();
BlockInfo bi = bm.getStoredBlock(eb.getLocalBlock());
bc = fsn.getBlockCollection(bi);
} finally {
fsn.writeUnlock();
}
DatanodeDescriptor dn = bc.getBlocks()[0].getDatanode(0);
bm.getDatanodeManager().getDecomManager().startDecommission(dn);
String dnName = dn.getXferAddr();
// wait for decommission start
DatanodeInfo datanodeInfo = null;
int count = 0;
do {
Thread.sleep(2000);
for (DatanodeInfo info : dfs.getDataNodeStats()) {
if (dnName.equals(info.getXferAddr())) {
datanodeInfo = info;
}
}
// instead of corruption (1) during decommissioning
if (!checkDecommissionInProgress && datanodeInfo != null && datanodeInfo.isDecommissionInProgress()) {
String fsckOut = runFsck(conf, 0, true, testFile);
checkDecommissionInProgress = true;
}
} while (datanodeInfo != null && !datanodeInfo.isDecommissioned());
// check the replica status should be healthy(0) after decommission
// is done
String fsckOut = runFsck(conf, 0, true, testFile);
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.
the class TestReconstructStripedBlocks method doTestMissingStripedBlock.
/**
* Start GROUP_SIZE + 1 datanodes.
* Inject striped blocks to first GROUP_SIZE datanodes.
* Then make numOfBusy datanodes busy, make numOfMissed datanodes missed.
* Then trigger BlockManager to compute reconstruction works. (so all
* reconstruction work will be scheduled to the last datanode)
* Finally, verify the reconstruction work of the last datanode.
*/
private void doTestMissingStripedBlock(int numOfMissed, int numOfBusy) throws Exception {
Configuration conf = new HdfsConfiguration();
initConf(conf);
conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, StripedFileTestUtil.getDefaultECPolicy().getName());
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize + 1).build();
try {
cluster.waitActive();
final int numBlocks = 4;
DFSTestUtil.createStripedFile(cluster, filePath, dirPath, numBlocks, 1, true);
// all blocks will be located at first GROUP_SIZE DNs, the last DN is
// empty because of the util function createStripedFile
// make sure the file is complete in NN
final INodeFile fileNode = cluster.getNamesystem().getFSDirectory().getINode4Write(filePath.toString()).asFile();
assertFalse(fileNode.isUnderConstruction());
assertTrue(fileNode.isStriped());
BlockInfo[] blocks = fileNode.getBlocks();
assertEquals(numBlocks, blocks.length);
for (BlockInfo blk : blocks) {
assertTrue(blk.isStriped());
assertTrue(blk.isComplete());
assertEquals(cellSize * dataBlocks, blk.getNumBytes());
final BlockInfoStriped sb = (BlockInfoStriped) blk;
assertEquals(groupSize, sb.numNodes());
}
final BlockManager bm = cluster.getNamesystem().getBlockManager();
BlockInfo firstBlock = fileNode.getBlocks()[0];
DatanodeStorageInfo[] storageInfos = bm.getStorages(firstBlock);
// make numOfBusy nodes busy
int i = 0;
for (; i < numOfBusy; i++) {
DatanodeDescriptor busyNode = storageInfos[i].getDatanodeDescriptor();
for (int j = 0; j < maxReplicationStreams + 1; j++) {
BlockManagerTestUtil.addBlockToBeReplicated(busyNode, new Block(j), new DatanodeStorageInfo[] { storageInfos[0] });
}
}
// make numOfMissed internal blocks missed
for (; i < numOfBusy + numOfMissed; i++) {
DatanodeDescriptor missedNode = storageInfos[i].getDatanodeDescriptor();
assertEquals(numBlocks, missedNode.numBlocks());
bm.getDatanodeManager().removeDatanode(missedNode);
}
BlockManagerTestUtil.getComputedDatanodeWork(bm);
// all the reconstruction work will be scheduled on the last DN
DataNode lastDn = cluster.getDataNodes().get(groupSize);
DatanodeDescriptor last = bm.getDatanodeManager().getDatanode(lastDn.getDatanodeId());
assertEquals("Counting the number of outstanding EC tasks", numBlocks, last.getNumberOfBlocksToBeErasureCoded());
List<BlockECReconstructionInfo> reconstruction = last.getErasureCodeCommand(numBlocks);
for (BlockECReconstructionInfo info : reconstruction) {
assertEquals(1, info.getTargetDnInfos().length);
assertEquals(last, info.getTargetDnInfos()[0]);
assertEquals(info.getSourceDnInfos().length, info.getLiveBlockIndices().length);
if (groupSize - numOfMissed == dataBlocks) {
// It's a QUEUE_HIGHEST_PRIORITY block, so the busy DNs will be chosen
// to make sure we have NUM_DATA_BLOCKS DNs to do reconstruction
// work.
assertEquals(dataBlocks, info.getSourceDnInfos().length);
} else {
// The block has no highest priority, so we don't use the busy DNs as
// sources
assertEquals(groupSize - numOfMissed - numOfBusy, info.getSourceDnInfos().length);
}
}
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.
the class TestStripedINodeFile method testDeleteOp.
/**
* Test the behavior of striped and contiguous block deletions.
*/
@Test(timeout = 60000)
public void testDeleteOp() throws Exception {
MiniDFSCluster cluster = null;
try {
final int len = 1024;
final Path parentDir = new Path("/parentDir");
final Path ecDir = new Path(parentDir, "ecDir");
final Path ecFile = new Path(ecDir, "ecFile");
final Path contiguousFile = new Path(parentDir, "someFile");
final DistributedFileSystem dfs;
final Configuration conf = new Configuration();
final short GROUP_SIZE = (short) (testECPolicy.getNumDataUnits() + testECPolicy.getNumParityUnits());
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY, 2);
conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, StripedFileTestUtil.getDefaultECPolicy().getName());
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(GROUP_SIZE).build();
cluster.waitActive();
FSNamesystem fsn = cluster.getNamesystem();
dfs = cluster.getFileSystem();
dfs.mkdirs(ecDir);
// set erasure coding policy
dfs.setErasureCodingPolicy(ecDir, StripedFileTestUtil.getDefaultECPolicy().getName());
DFSTestUtil.createFile(dfs, ecFile, len, (short) 1, 0xFEED);
DFSTestUtil.createFile(dfs, contiguousFile, len, (short) 1, 0xFEED);
final FSDirectory fsd = fsn.getFSDirectory();
// Case-1: Verify the behavior of striped blocks
// Get blocks of striped file
INode inodeStriped = fsd.getINode("/parentDir/ecDir/ecFile");
assertTrue("Failed to get INodeFile for /parentDir/ecDir/ecFile", inodeStriped instanceof INodeFile);
INodeFile inodeStripedFile = (INodeFile) inodeStriped;
BlockInfo[] stripedBlks = inodeStripedFile.getBlocks();
for (BlockInfo blockInfo : stripedBlks) {
assertFalse("Mistakenly marked the block as deleted!", blockInfo.isDeleted());
}
// delete directory with erasure coding policy
dfs.delete(ecDir, true);
for (BlockInfo blockInfo : stripedBlks) {
assertTrue("Didn't mark the block as deleted!", blockInfo.isDeleted());
}
// Case-2: Verify the behavior of contiguous blocks
// Get blocks of contiguous file
INode inode = fsd.getINode("/parentDir/someFile");
assertTrue("Failed to get INodeFile for /parentDir/someFile", inode instanceof INodeFile);
INodeFile inodeFile = (INodeFile) inode;
BlockInfo[] contiguousBlks = inodeFile.getBlocks();
for (BlockInfo blockInfo : contiguousBlks) {
assertFalse("Mistakenly marked the block as deleted!", blockInfo.isDeleted());
}
// delete parent directory
dfs.delete(parentDir, true);
for (BlockInfo blockInfo : contiguousBlks) {
assertTrue("Didn't mark the block as deleted!", blockInfo.isDeleted());
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.
the class TestFileWithSnapshotFeature method testUpdateQuotaAndCollectBlocks.
@Test
public void testUpdateQuotaAndCollectBlocks() {
FileDiffList diffs = new FileDiffList();
FileWithSnapshotFeature sf = new FileWithSnapshotFeature(diffs);
FileDiff diff = mock(FileDiff.class);
BlockStoragePolicySuite bsps = mock(BlockStoragePolicySuite.class);
BlockStoragePolicy bsp = mock(BlockStoragePolicy.class);
BlockInfo[] blocks = new BlockInfo[] { new BlockInfoContiguous(new Block(1, BLOCK_SIZE, 1), REPL_1) };
BlockManager bm = mock(BlockManager.class);
// No snapshot
INodeFile file = mock(INodeFile.class);
when(file.getFileWithSnapshotFeature()).thenReturn(sf);
when(file.getBlocks()).thenReturn(blocks);
when(file.getStoragePolicyID()).thenReturn((byte) 1);
Whitebox.setInternalState(file, "header", (long) REPL_1 << 48);
when(file.getPreferredBlockReplication()).thenReturn(REPL_1);
when(bsps.getPolicy(anyByte())).thenReturn(bsp);
INode.BlocksMapUpdateInfo collectedBlocks = mock(INode.BlocksMapUpdateInfo.class);
ArrayList<INode> removedINodes = new ArrayList<>();
INode.ReclaimContext ctx = new INode.ReclaimContext(bsps, collectedBlocks, removedINodes, null);
sf.updateQuotaAndCollectBlocks(ctx, file, diff);
QuotaCounts counts = ctx.quotaDelta().getCountsCopy();
Assert.assertEquals(0, counts.getStorageSpace());
Assert.assertTrue(counts.getTypeSpaces().allLessOrEqual(0));
// INode only exists in the snapshot
INodeFile snapshotINode = mock(INodeFile.class);
Whitebox.setInternalState(snapshotINode, "header", (long) REPL_3 << 48);
Whitebox.setInternalState(diff, "snapshotINode", snapshotINode);
when(diff.getSnapshotINode()).thenReturn(snapshotINode);
when(bsp.chooseStorageTypes(REPL_1)).thenReturn(Lists.newArrayList(SSD));
when(bsp.chooseStorageTypes(REPL_3)).thenReturn(Lists.newArrayList(DISK));
blocks[0].setReplication(REPL_3);
sf.updateQuotaAndCollectBlocks(ctx, file, diff);
counts = ctx.quotaDelta().getCountsCopy();
Assert.assertEquals((REPL_3 - REPL_1) * BLOCK_SIZE, counts.getStorageSpace());
Assert.assertEquals(BLOCK_SIZE, counts.getTypeSpaces().get(DISK));
Assert.assertEquals(-BLOCK_SIZE, counts.getTypeSpaces().get(SSD));
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.
the class TestSnapshotBlocksMap method assertBlockCollection.
static INodeFile assertBlockCollection(String path, int numBlocks, final FSDirectory dir, final BlockManager blkManager) throws Exception {
final INodeFile file = INodeFile.valueOf(dir.getINode(path), path);
assertEquals(numBlocks, file.getBlocks().length);
for (BlockInfo b : file.getBlocks()) {
assertBlockCollection(blkManager, file, b);
}
return file;
}
Aggregations