use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.
the class TestAddOverReplicatedStripedBlocks method testProcessOverReplicatedSBSmallerThanFullBlocks.
@Test
public void testProcessOverReplicatedSBSmallerThanFullBlocks() throws Exception {
// Create a EC file which doesn't fill full internal blocks.
int fileLen = cellSize * (dataBlocks - 1);
byte[] content = new byte[fileLen];
DFSTestUtil.writeFile(fs, filePath, new String(content));
LocatedBlocks lbs = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), 0, fileLen);
LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(0));
long gs = bg.getBlock().getGenerationStamp();
String bpid = bg.getBlock().getBlockPoolId();
long groupId = bg.getBlock().getBlockId();
Block blk = new Block(groupId, blockSize, gs);
cluster.triggerBlockReports();
List<DatanodeInfo> infos = Arrays.asList(bg.getLocations());
// let a internal block be over replicated with (numDNs - GROUP_SIZE + 1)
// redundant blocks. Therefor number of internal blocks is over GROUP_SIZE.
blk.setBlockId(groupId);
List<DataNode> dataNodeList = cluster.getDataNodes();
for (int i = 0; i < numDNs; i++) {
if (!infos.contains(dataNodeList.get(i).getDatanodeId())) {
cluster.injectBlocks(i, Arrays.asList(blk), bpid);
System.out.println("XXX: inject block into datanode " + i);
}
}
// update blocksMap
cluster.triggerBlockReports();
// add to invalidates
cluster.triggerHeartbeats();
// datanode delete block
cluster.triggerHeartbeats();
// update blocksMap
cluster.triggerBlockReports();
// verify that all internal blocks exists
lbs = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), 0, fileLen);
StripedFileTestUtil.verifyLocatedStripedBlocks(lbs, groupSize - 1);
}
use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.
the class TestAddStripedBlocks method testAddUCReplica.
/**
* Test BlockInfoStripedUnderConstruction#addReplicaIfNotPresent in different
* scenarios.
*/
@Test
public void testAddUCReplica() throws Exception {
final Path file = new Path("/file1");
final List<String> storageIDs = new ArrayList<>();
// create an empty file
FSDataOutputStream out = null;
try {
out = dfs.create(file, (short) 1);
// 1. create the UC striped block
FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile();
cluster.getNamesystem().getAdditionalBlock(file.toString(), fileNode.getId(), dfs.getClient().getClientName(), null, null, null, null);
BlockInfo lastBlock = fileNode.getLastBlock();
DatanodeStorageInfo[] locs = lastBlock.getUnderConstructionFeature().getExpectedStorageLocations();
byte[] indices = lastBlock.getUnderConstructionFeature().getBlockIndices();
Assert.assertEquals(groupSize, locs.length);
Assert.assertEquals(groupSize, indices.length);
// 2. mimic incremental block reports and make sure the uc-replica list in
// the BlockInfoUCStriped is correct
int i = 0;
for (DataNode dn : cluster.getDataNodes()) {
final Block block = new Block(lastBlock.getBlockId() + i++, 0, lastBlock.getGenerationStamp());
DatanodeStorage storage = new DatanodeStorage(UUID.randomUUID().toString());
storageIDs.add(storage.getStorageID());
StorageReceivedDeletedBlocks[] reports = DFSTestUtil.makeReportForReceivedBlock(block, BlockStatus.RECEIVING_BLOCK, storage);
for (StorageReceivedDeletedBlocks report : reports) {
cluster.getNamesystem().processIncrementalBlockReport(dn.getDatanodeId(), report);
}
}
// make sure lastBlock is correct and the storages have been updated
locs = lastBlock.getUnderConstructionFeature().getExpectedStorageLocations();
indices = lastBlock.getUnderConstructionFeature().getBlockIndices();
Assert.assertEquals(groupSize, locs.length);
Assert.assertEquals(groupSize, indices.length);
for (DatanodeStorageInfo newstorage : locs) {
Assert.assertTrue(storageIDs.contains(newstorage.getStorageID()));
}
} finally {
IOUtils.cleanup(null, out);
}
// 3. restart the namenode. mimic the full block reports and check the
// uc-replica list again
cluster.restartNameNode(true);
final String bpId = cluster.getNamesystem().getBlockPoolId();
INodeFile fileNode = cluster.getNamesystem().getFSDirectory().getINode4Write(file.toString()).asFile();
BlockInfo lastBlock = fileNode.getLastBlock();
int i = groupSize - 1;
for (DataNode dn : cluster.getDataNodes()) {
String storageID = storageIDs.get(i);
final Block block = new Block(lastBlock.getBlockId() + i--, lastBlock.getGenerationStamp(), 0);
DatanodeStorage storage = new DatanodeStorage(storageID);
List<ReplicaBeingWritten> blocks = new ArrayList<>();
ReplicaBeingWritten replica = new ReplicaBeingWritten(block, null, null, null);
blocks.add(replica);
BlockListAsLongs bll = BlockListAsLongs.encode(blocks);
StorageBlockReport[] reports = { new StorageBlockReport(storage, bll) };
cluster.getNameNodeRpc().blockReport(dn.getDNRegistrationForBP(bpId), bpId, reports, new BlockReportContext(1, 0, System.nanoTime(), 0, true));
}
DatanodeStorageInfo[] locs = lastBlock.getUnderConstructionFeature().getExpectedStorageLocations();
byte[] indices = lastBlock.getUnderConstructionFeature().getBlockIndices();
Assert.assertEquals(groupSize, locs.length);
Assert.assertEquals(groupSize, indices.length);
for (i = 0; i < groupSize; i++) {
Assert.assertEquals(storageIDs.get(i), locs[groupSize - 1 - i].getStorageID());
Assert.assertEquals(groupSize - i - 1, indices[i]);
}
}
use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.
the class TestAddStripedBlocks method testBlockScheduledUpdate.
/**
* Check if the scheduled block size on each DN storage is correctly updated
*/
@Test
public void testBlockScheduledUpdate() throws Exception {
final FSNamesystem fsn = cluster.getNamesystem();
final Path foo = new Path("/foo");
try (FSDataOutputStream out = dfs.create(foo, true)) {
DFSStripedOutputStream sout = (DFSStripedOutputStream) out.getWrappedStream();
writeAndFlushStripedOutputStream(sout, DFS_BYTES_PER_CHECKSUM_DEFAULT);
// make sure the scheduled block size has been updated for each DN storage
// in NN
final List<DatanodeDescriptor> dnList = new ArrayList<>();
fsn.getBlockManager().getDatanodeManager().fetchDatanodes(dnList, null, false);
for (DatanodeDescriptor dn : dnList) {
Assert.assertEquals(1, dn.getBlocksScheduled());
}
}
// we have completed the file, force the DN to flush IBR
for (DataNode dn : cluster.getDataNodes()) {
DataNodeTestUtils.triggerBlockReport(dn);
}
// check the scheduled block size again
final List<DatanodeDescriptor> dnList = new ArrayList<>();
fsn.getBlockManager().getDatanodeManager().fetchDatanodes(dnList, null, false);
for (DatanodeDescriptor dn : dnList) {
Assert.assertEquals(0, dn.getBlocksScheduled());
}
}
use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.
the class TestReconstructStripedBlocks method testCountLiveReplicas.
/**
* make sure the NN can detect the scenario where there are enough number of
* internal blocks (>=9 by default) but there is still missing data/parity
* block.
*/
@Test
public void testCountLiveReplicas() throws Exception {
final HdfsConfiguration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false);
conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, StripedFileTestUtil.getDefaultECPolicy().getName());
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize + 2).build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
try {
fs.mkdirs(dirPath);
fs.setErasureCodingPolicy(dirPath, StripedFileTestUtil.getDefaultECPolicy().getName());
DFSTestUtil.createFile(fs, filePath, cellSize * dataBlocks * 2, (short) 1, 0L);
// stop a dn
LocatedBlocks blks = fs.getClient().getLocatedBlocks(filePath.toString(), 0);
LocatedStripedBlock block = (LocatedStripedBlock) blks.getLastLocatedBlock();
DatanodeInfo dnToStop = block.getLocations()[0];
MiniDFSCluster.DataNodeProperties dnProp = cluster.stopDataNode(dnToStop.getXferAddr());
cluster.setDataNodeDead(dnToStop);
// wait for reconstruction to happen
DFSTestUtil.waitForReplication(fs, filePath, groupSize, 15 * 1000);
// bring the dn back: 10 internal blocks now
cluster.restartDataNode(dnProp);
cluster.waitActive();
// stop another dn: 9 internal blocks, but only cover 8 real one
dnToStop = block.getLocations()[1];
cluster.stopDataNode(dnToStop.getXferAddr());
cluster.setDataNodeDead(dnToStop);
// currently namenode is able to track the missing block. but restart NN
cluster.restartNameNode(true);
for (DataNode dn : cluster.getDataNodes()) {
DataNodeTestUtils.triggerBlockReport(dn);
}
FSNamesystem fsn = cluster.getNamesystem();
BlockManager bm = fsn.getBlockManager();
// wait 3 running cycles of redundancy monitor
Thread.sleep(3000);
for (DataNode dn : cluster.getDataNodes()) {
DataNodeTestUtils.triggerHeartbeat(dn);
}
// check if NN can detect the missing internal block and finish the
// reconstruction
StripedFileTestUtil.waitForReconstructionFinished(filePath, fs, groupSize);
boolean reconstructed = false;
for (int i = 0; i < 5; i++) {
NumberReplicas num = null;
fsn.readLock();
try {
BlockInfo blockInfo = cluster.getNamesystem().getFSDirectory().getINode4Write(filePath.toString()).asFile().getLastBlock();
num = bm.countNodes(blockInfo);
} finally {
fsn.readUnlock();
}
if (num.liveReplicas() >= groupSize) {
reconstructed = true;
break;
} else {
Thread.sleep(1000);
}
}
Assert.assertTrue(reconstructed);
blks = fs.getClient().getLocatedBlocks(filePath.toString(), 0);
block = (LocatedStripedBlock) blks.getLastLocatedBlock();
BitSet bitSet = new BitSet(groupSize);
for (byte index : block.getBlockIndices()) {
bitSet.set(index);
}
for (int i = 0; i < groupSize; i++) {
Assert.assertTrue(bitSet.get(i));
}
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.
the class TestReconstructStripedBlocks method getNumberOfBlocksToBeErasureCoded.
private static int getNumberOfBlocksToBeErasureCoded(MiniDFSCluster cluster) throws Exception {
DatanodeManager dm = cluster.getNamesystem().getBlockManager().getDatanodeManager();
int count = 0;
for (DataNode dn : cluster.getDataNodes()) {
DatanodeDescriptor dd = dm.getDatanode(dn.getDatanodeId());
count += dd.getNumberOfBlocksToBeErasureCoded();
}
return count;
}
Aggregations