use of org.apache.hadoop.hdfs.server.protocol.DatanodeStorage in project hadoop by apache.
the class TestIncrementalBlockReports method injectBlockReceived.
/**
* Inject a fake 'received' block into the BPServiceActor state.
*/
private void injectBlockReceived() {
ReceivedDeletedBlockInfo rdbi = new ReceivedDeletedBlockInfo(getDummyBlock(), BlockStatus.RECEIVED_BLOCK, null);
DatanodeStorage s = singletonDn.getFSDataset().getStorage(storageUuid);
actor.getIbrManager().notifyNamenodeBlock(rdbi, s, false);
}
use of org.apache.hadoop.hdfs.server.protocol.DatanodeStorage in project hadoop by apache.
the class TestIncrementalBrVariations method verifyIncrementalBlockReports.
public void verifyIncrementalBlockReports(boolean splitReports) throws IOException {
// Get the block list for the file with the block locations.
LocatedBlocks blocks = createFileGetBlocks(GenericTestUtils.getMethodName());
try (FsDatasetSpi.FsVolumeReferences volumes = dn0.getFSDataset().getFsVolumeReferences()) {
// We will send 'fake' incremental block reports to the NN that look
// like they originated from DN 0.
StorageReceivedDeletedBlocks[] reports = new StorageReceivedDeletedBlocks[volumes.size()];
// Lie to the NN that one block on each storage has been deleted.
for (int i = 0; i < reports.length; ++i) {
FsVolumeSpi volume = volumes.get(i);
boolean foundBlockOnStorage = false;
ReceivedDeletedBlockInfo[] rdbi = new ReceivedDeletedBlockInfo[1];
// report.
for (LocatedBlock block : blocks.getLocatedBlocks()) {
if (block.getStorageIDs()[0].equals(volume.getStorageID())) {
rdbi[0] = new ReceivedDeletedBlockInfo(block.getBlock().getLocalBlock(), ReceivedDeletedBlockInfo.BlockStatus.DELETED_BLOCK, null);
foundBlockOnStorage = true;
break;
}
}
assertTrue(foundBlockOnStorage);
reports[i] = new StorageReceivedDeletedBlocks(new DatanodeStorage(volume.getStorageID()), rdbi);
if (splitReports) {
// If we are splitting reports then send the report for this storage now.
StorageReceivedDeletedBlocks[] singletonReport = { reports[i] };
cluster.getNameNodeRpc().blockReceivedAndDeleted(dn0Reg, poolId, singletonReport);
}
}
if (!splitReports) {
// Send a combined report.
cluster.getNameNodeRpc().blockReceivedAndDeleted(dn0Reg, poolId, reports);
}
// Make sure that the deleted block from each storage was picked up
// by the NameNode. IBRs are async, make sure the NN processes
// all of them.
cluster.getNamesystem().getBlockManager().flushBlockOps();
assertThat(cluster.getNamesystem().getMissingBlocksCount(), is((long) reports.length));
}
}
Aggregations