use of org.apache.hadoop.hdfs.server.protocol.StorageBlockReport in project hadoop by apache.
the class TestDnRespectsBlockReportSplitThreshold method testCornerCaseUnderThreshold.
/**
* Tests the behavior when the count of blocks is exactly one less than
* the threshold.
*/
@Test(timeout = 300000)
public void testCornerCaseUnderThreshold() throws IOException, InterruptedException {
startUpCluster(BLOCKS_IN_FILE + 1);
NameNode nn = cluster.getNameNode();
DataNode dn = cluster.getDataNodes().get(0);
// Create a file with a few blocks.
createFile(GenericTestUtils.getMethodName(), BLOCKS_IN_FILE);
// Insert a spy object for the NN RPC.
DatanodeProtocolClientSideTranslatorPB nnSpy = InternalDataNodeTestUtils.spyOnBposToNN(dn, nn);
// Trigger a block report so there is an interaction with the spy
// object.
DataNodeTestUtils.triggerBlockReport(dn);
ArgumentCaptor<StorageBlockReport[]> captor = ArgumentCaptor.forClass(StorageBlockReport[].class);
Mockito.verify(nnSpy, times(1)).blockReport(any(DatanodeRegistration.class), anyString(), captor.capture(), Mockito.<BlockReportContext>anyObject());
verifyCapturedArguments(captor, cluster.getStoragesPerDatanode(), BLOCKS_IN_FILE);
}
use of org.apache.hadoop.hdfs.server.protocol.StorageBlockReport in project hadoop by apache.
the class TestDnRespectsBlockReportSplitThreshold method testCornerCaseAtThreshold.
/**
* Tests the behavior when the count of blocks is exactly equal to the
* threshold.
*/
@Test(timeout = 300000)
public void testCornerCaseAtThreshold() throws IOException, InterruptedException {
startUpCluster(BLOCKS_IN_FILE);
NameNode nn = cluster.getNameNode();
DataNode dn = cluster.getDataNodes().get(0);
// Create a file with a few blocks.
createFile(GenericTestUtils.getMethodName(), BLOCKS_IN_FILE);
// Insert a spy object for the NN RPC.
DatanodeProtocolClientSideTranslatorPB nnSpy = InternalDataNodeTestUtils.spyOnBposToNN(dn, nn);
// Trigger a block report so there is an interaction with the spy
// object.
DataNodeTestUtils.triggerBlockReport(dn);
ArgumentCaptor<StorageBlockReport[]> captor = ArgumentCaptor.forClass(StorageBlockReport[].class);
Mockito.verify(nnSpy, times(cluster.getStoragesPerDatanode())).blockReport(any(DatanodeRegistration.class), anyString(), captor.capture(), Mockito.<BlockReportContext>anyObject());
verifyCapturedArguments(captor, 1, BLOCKS_IN_FILE);
}
use of org.apache.hadoop.hdfs.server.protocol.StorageBlockReport in project hadoop by apache.
the class TestNNHandlesBlockReportPerStorage method sendBlockReports.
@Override
protected void sendBlockReports(DatanodeRegistration dnR, String poolId, StorageBlockReport[] reports) throws IOException {
int i = 0;
for (StorageBlockReport report : reports) {
LOG.info("Sending block report for storage " + report.getStorage().getStorageID());
StorageBlockReport[] singletonReport = { report };
cluster.getNameNodeRpc().blockReport(dnR, poolId, singletonReport, new BlockReportContext(reports.length, i, System.nanoTime(), 0L, true));
i++;
}
}
use of org.apache.hadoop.hdfs.server.protocol.StorageBlockReport in project hadoop by apache.
the class TestDNFencing method testRBWReportArrivesAfterEdits.
/**
* Another regression test for HDFS-2742. This tests the following sequence:
* - DN does a block report while file is open. This BR contains
* the block in RBW state.
* - The block report is delayed in reaching the standby.
* - The file is closed.
* - The standby processes the OP_ADD and OP_CLOSE operations before
* the RBW block report arrives.
* - The standby should not mark the block as corrupt.
*/
@Test
public void testRBWReportArrivesAfterEdits() throws Exception {
final CountDownLatch brFinished = new CountDownLatch(1);
DelayAnswer delayer = new GenericTestUtils.DelayAnswer(LOG) {
@Override
protected Object passThrough(InvocationOnMock invocation) throws Throwable {
try {
return super.passThrough(invocation);
} finally {
// inform the test that our block report went through.
brFinished.countDown();
}
}
};
FSDataOutputStream out = fs.create(TEST_FILE_PATH);
try {
AppendTestUtil.write(out, 0, 10);
out.hflush();
DataNode dn = cluster.getDataNodes().get(0);
DatanodeProtocolClientSideTranslatorPB spy = InternalDataNodeTestUtils.spyOnBposToNN(dn, nn2);
Mockito.doAnswer(delayer).when(spy).blockReport(Mockito.<DatanodeRegistration>anyObject(), Mockito.anyString(), Mockito.<StorageBlockReport[]>anyObject(), Mockito.<BlockReportContext>anyObject());
dn.scheduleAllBlockReport(0);
delayer.waitForCall();
} finally {
IOUtils.closeStream(out);
}
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
delayer.proceed();
brFinished.await();
// Verify that no replicas are marked corrupt, and that the
// file is readable from the failed-over standby.
BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager());
BlockManagerTestUtil.updateState(nn2.getNamesystem().getBlockManager());
assertEquals(0, nn1.getNamesystem().getCorruptReplicaBlocks());
assertEquals(0, nn2.getNamesystem().getCorruptReplicaBlocks());
DFSTestUtil.readFile(fs, TEST_FILE_PATH);
}
use of org.apache.hadoop.hdfs.server.protocol.StorageBlockReport in project hadoop by apache.
the class TestBlockListAsLongs method testDatanodeDetect.
@Test
public void testDatanodeDetect() throws ServiceException, IOException {
final AtomicReference<BlockReportRequestProto> request = new AtomicReference<>();
// just capture the outgoing PB
DatanodeProtocolPB mockProxy = mock(DatanodeProtocolPB.class);
doAnswer(new Answer<BlockReportResponseProto>() {
public BlockReportResponseProto answer(InvocationOnMock invocation) {
Object[] args = invocation.getArguments();
request.set((BlockReportRequestProto) args[1]);
return BlockReportResponseProto.newBuilder().build();
}
}).when(mockProxy).blockReport(any(RpcController.class), any(BlockReportRequestProto.class));
@SuppressWarnings("resource") DatanodeProtocolClientSideTranslatorPB nn = new DatanodeProtocolClientSideTranslatorPB(mockProxy);
DatanodeRegistration reg = DFSTestUtil.getLocalDatanodeRegistration();
NamespaceInfo nsInfo = new NamespaceInfo(1, "cluster", "bp", 1);
reg.setNamespaceInfo(nsInfo);
Replica r = new FinalizedReplica(new Block(1, 2, 3), null, null);
BlockListAsLongs bbl = BlockListAsLongs.encode(Collections.singleton(r));
DatanodeStorage storage = new DatanodeStorage("s1");
StorageBlockReport[] sbr = { new StorageBlockReport(storage, bbl) };
// check DN sends new-style BR
request.set(null);
nsInfo.setCapabilities(Capability.STORAGE_BLOCK_REPORT_BUFFERS.getMask());
nn.blockReport(reg, "pool", sbr, new BlockReportContext(1, 0, System.nanoTime(), 0L, true));
BlockReportRequestProto proto = request.get();
assertNotNull(proto);
assertTrue(proto.getReports(0).getBlocksList().isEmpty());
assertFalse(proto.getReports(0).getBlocksBuffersList().isEmpty());
// back up to prior version and check DN sends old-style BR
request.set(null);
nsInfo.setCapabilities(Capability.UNKNOWN.getMask());
BlockListAsLongs blockList = getBlockList(r);
StorageBlockReport[] obp = new StorageBlockReport[] { new StorageBlockReport(new DatanodeStorage("s1"), blockList) };
nn.blockReport(reg, "pool", obp, new BlockReportContext(1, 0, System.nanoTime(), 0L, true));
proto = request.get();
assertNotNull(proto);
assertFalse(proto.getReports(0).getBlocksList().isEmpty());
assertTrue(proto.getReports(0).getBlocksBuffersList().isEmpty());
}
Aggregations