use of org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks in project hadoop by apache.
the class DatanodeProtocolServerSideTranslatorPB method blockReceivedAndDeleted.
@Override
public BlockReceivedAndDeletedResponseProto blockReceivedAndDeleted(RpcController controller, BlockReceivedAndDeletedRequestProto request) throws ServiceException {
List<StorageReceivedDeletedBlocksProto> sBlocks = request.getBlocksList();
StorageReceivedDeletedBlocks[] info = new StorageReceivedDeletedBlocks[sBlocks.size()];
for (int i = 0; i < sBlocks.size(); i++) {
StorageReceivedDeletedBlocksProto sBlock = sBlocks.get(i);
List<ReceivedDeletedBlockInfoProto> list = sBlock.getBlocksList();
ReceivedDeletedBlockInfo[] rdBlocks = new ReceivedDeletedBlockInfo[list.size()];
for (int j = 0; j < list.size(); j++) {
rdBlocks[j] = PBHelper.convert(list.get(j));
}
if (sBlock.hasStorage()) {
info[i] = new StorageReceivedDeletedBlocks(PBHelperClient.convert(sBlock.getStorage()), rdBlocks);
} else {
info[i] = new StorageReceivedDeletedBlocks(new DatanodeStorage(sBlock.getStorageUuid()), rdBlocks);
}
}
try {
impl.blockReceivedAndDeleted(PBHelper.convert(request.getRegistration()), request.getBlockPoolId(), info);
} catch (IOException e) {
throw new ServiceException(e);
}
return VOID_BLOCK_RECEIVED_AND_DELETE_RESPONSE;
}
use of org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks in project hadoop by apache.
the class IncrementalBlockReportManager method generateIBRs.
private synchronized StorageReceivedDeletedBlocks[] generateIBRs() {
final List<StorageReceivedDeletedBlocks> reports = new ArrayList<>(pendingIBRs.size());
for (Map.Entry<DatanodeStorage, PerStorageIBR> entry : pendingIBRs.entrySet()) {
final PerStorageIBR perStorage = entry.getValue();
// Send newly-received and deleted blockids to namenode
final ReceivedDeletedBlockInfo[] rdbi = perStorage.removeAll();
if (rdbi != null) {
reports.add(new StorageReceivedDeletedBlocks(entry.getKey(), rdbi));
}
}
readyToSend = false;
return reports.toArray(new StorageReceivedDeletedBlocks[reports.size()]);
}
use of org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks in project hadoop by apache.
the class TestAddStripedBlocks method testCheckStripedReplicaCorrupt.
@Test
public void testCheckStripedReplicaCorrupt() throws Exception {
final int numBlocks = 4;
final int numStripes = 4;
final Path filePath = new Path("/corrupt");
final FSNamesystem ns = cluster.getNameNode().getNamesystem();
final BlockManager bm = ns.getBlockManager();
DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks, numStripes, false);
INodeFile fileNode = ns.getFSDirectory().getINode(filePath.toString()).asFile();
Assert.assertTrue(fileNode.isStriped());
BlockInfo stored = fileNode.getBlocks()[0];
BlockManagerTestUtil.updateState(ns.getBlockManager());
Assert.assertEquals(0, ns.getCorruptReplicaBlocks());
// Now send a block report with correct size
DatanodeStorage storage = new DatanodeStorage(UUID.randomUUID().toString());
final Block reported = new Block(stored);
reported.setNumBytes(numStripes * cellSize);
StorageReceivedDeletedBlocks[] reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
ns.processIncrementalBlockReport(cluster.getDataNodes().get(0).getDatanodeId(), reports[0]);
BlockManagerTestUtil.updateState(ns.getBlockManager());
Assert.assertEquals(0, ns.getCorruptReplicaBlocks());
// Now send a block report with wrong size
reported.setBlockId(stored.getBlockId() + 1);
reported.setNumBytes(numStripes * cellSize - 1);
reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
ns.processIncrementalBlockReport(cluster.getDataNodes().get(1).getDatanodeId(), reports[0]);
BlockManagerTestUtil.updateState(ns.getBlockManager());
Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
// Now send a parity block report with correct size
reported.setBlockId(stored.getBlockId() + dataBlocks);
reported.setNumBytes(numStripes * cellSize);
reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
ns.processIncrementalBlockReport(cluster.getDataNodes().get(2).getDatanodeId(), reports[0]);
BlockManagerTestUtil.updateState(ns.getBlockManager());
Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
// Now send a parity block report with wrong size
reported.setBlockId(stored.getBlockId() + dataBlocks);
reported.setNumBytes(numStripes * cellSize + 1);
reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
ns.processIncrementalBlockReport(cluster.getDataNodes().get(3).getDatanodeId(), reports[0]);
BlockManagerTestUtil.updateState(ns.getBlockManager());
// the total number of corrupted block info is still 1
Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
// 2 internal blocks corrupted
Assert.assertEquals(2, bm.getCorruptReplicas(stored).size());
// Now change the size of stored block, and test verifying the last
// block size
stored.setNumBytes(stored.getNumBytes() + 10);
reported.setBlockId(stored.getBlockId() + dataBlocks + 2);
reported.setNumBytes(numStripes * cellSize);
reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
ns.processIncrementalBlockReport(cluster.getDataNodes().get(4).getDatanodeId(), reports[0]);
BlockManagerTestUtil.updateState(ns.getBlockManager());
Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
Assert.assertEquals(3, bm.getCorruptReplicas(stored).size());
// Now send a parity block report with correct size based on adjusted
// size of stored block
/** Now stored block has {@link numStripes} full stripes + a cell + 10 */
stored.setNumBytes(stored.getNumBytes() + cellSize);
reported.setBlockId(stored.getBlockId());
reported.setNumBytes((numStripes + 1) * cellSize);
reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
ns.processIncrementalBlockReport(cluster.getDataNodes().get(0).getDatanodeId(), reports[0]);
BlockManagerTestUtil.updateState(ns.getBlockManager());
Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
Assert.assertEquals(3, bm.getCorruptReplicas(stored).size());
reported.setBlockId(stored.getBlockId() + 1);
reported.setNumBytes(numStripes * cellSize + 10);
reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
ns.processIncrementalBlockReport(cluster.getDataNodes().get(0).getDatanodeId(), reports[0]);
BlockManagerTestUtil.updateState(ns.getBlockManager());
Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
Assert.assertEquals(3, bm.getCorruptReplicas(stored).size());
reported.setBlockId(stored.getBlockId() + dataBlocks);
reported.setNumBytes((numStripes + 1) * cellSize);
reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
ns.processIncrementalBlockReport(cluster.getDataNodes().get(2).getDatanodeId(), reports[0]);
BlockManagerTestUtil.updateState(ns.getBlockManager());
Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
Assert.assertEquals(3, bm.getCorruptReplicas(stored).size());
}
Aggregations