use of org.apache.hadoop.hdfs.server.datanode.FinalizedReplica in project hadoop by apache.
the class TestBlockManager method testSafeModeIBRBeforeFirstFullBR.
/**
* test when NN starts and in same mode, it receives an incremental blockReport
* firstly. Then receives first full block report.
*/
@Test
public void testSafeModeIBRBeforeFirstFullBR() throws Exception {
// pretend to be in safemode
doReturn(true).when(fsn).isInStartupSafeMode();
DatanodeDescriptor node = nodes.get(0);
DatanodeStorageInfo ds = node.getStorageInfos()[0];
node.setAlive(true);
DatanodeRegistration nodeReg = new DatanodeRegistration(node, null, null, "");
// register new node
bm.getDatanodeManager().registerDatanode(nodeReg);
bm.getDatanodeManager().addDatanode(node);
assertEquals(node, bm.getDatanodeManager().getDatanode(node));
assertEquals(0, ds.getBlockReportCount());
// Build a incremental report
List<ReceivedDeletedBlockInfo> rdbiList = new ArrayList<>();
// Build a full report
BlockListAsLongs.Builder builder = BlockListAsLongs.builder();
// blk_42 is finalized.
// arbitrary
long receivedBlockId = 42;
BlockInfo receivedBlock = addBlockToBM(receivedBlockId);
rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivedBlock), ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null));
builder.add(new FinalizedReplica(receivedBlock, null, null));
// blk_43 is under construction.
long receivingBlockId = 43;
BlockInfo receivingBlock = addUcBlockToBM(receivingBlockId);
rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivingBlock), ReceivedDeletedBlockInfo.BlockStatus.RECEIVING_BLOCK, null));
builder.add(new ReplicaBeingWritten(receivingBlock, null, null, null));
// blk_44 has 2 records in IBR. It's finalized. So full BR has 1 record.
long receivingReceivedBlockId = 44;
BlockInfo receivingReceivedBlock = addBlockToBM(receivingReceivedBlockId);
rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivingReceivedBlock), ReceivedDeletedBlockInfo.BlockStatus.RECEIVING_BLOCK, null));
rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivingReceivedBlock), ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null));
builder.add(new FinalizedReplica(receivingReceivedBlock, null, null));
// blk_45 is not in full BR, because it's deleted.
long ReceivedDeletedBlockId = 45;
rdbiList.add(new ReceivedDeletedBlockInfo(new Block(ReceivedDeletedBlockId), ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null));
rdbiList.add(new ReceivedDeletedBlockInfo(new Block(ReceivedDeletedBlockId), ReceivedDeletedBlockInfo.BlockStatus.DELETED_BLOCK, null));
// blk_46 exists in DN for a long time, so it's in full BR, but not in IBR.
long existedBlockId = 46;
BlockInfo existedBlock = addBlockToBM(existedBlockId);
builder.add(new FinalizedReplica(existedBlock, null, null));
// process IBR and full BR
StorageReceivedDeletedBlocks srdb = new StorageReceivedDeletedBlocks(new DatanodeStorage(ds.getStorageID()), rdbiList.toArray(new ReceivedDeletedBlockInfo[rdbiList.size()]));
bm.processIncrementalBlockReport(node, srdb);
// Make sure it's the first full report
assertEquals(0, ds.getBlockReportCount());
bm.processReport(node, new DatanodeStorage(ds.getStorageID()), builder.build(), new BlockReportContext(1, 0, System.nanoTime(), 0, true));
assertEquals(1, ds.getBlockReportCount());
// verify the storage info is correct
assertTrue(bm.getStoredBlock(new Block(receivedBlockId)).findStorageInfo(ds) >= 0);
assertTrue(bm.getStoredBlock(new Block(receivingBlockId)).getUnderConstructionFeature().getNumExpectedLocations() > 0);
assertTrue(bm.getStoredBlock(new Block(receivingReceivedBlockId)).findStorageInfo(ds) >= 0);
assertNull(bm.getStoredBlock(new Block(ReceivedDeletedBlockId)));
assertTrue(bm.getStoredBlock(new Block(existedBlock)).findStorageInfo(ds) >= 0);
}
use of org.apache.hadoop.hdfs.server.datanode.FinalizedReplica in project hadoop by apache.
the class FsDatasetUtil method computeChecksum.
/**
* Compute the checksum for a block file that does not already have
* its checksum computed, and save it to dstMeta file.
*/
public static void computeChecksum(File srcMeta, File dstMeta, File blockFile, int smallBufferSize, Configuration conf) throws IOException {
Preconditions.checkNotNull(srcMeta);
Preconditions.checkNotNull(dstMeta);
Preconditions.checkNotNull(blockFile);
// Create a dummy ReplicaInfo object pointing to the blockFile.
ReplicaInfo wrapper = new FinalizedReplica(0, 0, 0, null, null) {
@Override
public URI getMetadataURI() {
return srcMeta.toURI();
}
@Override
public InputStream getDataInputStream(long seekOffset) throws IOException {
return new FileInputStream(blockFile);
}
};
FsDatasetImpl.computeChecksum(wrapper, dstMeta, smallBufferSize, conf);
}
use of org.apache.hadoop.hdfs.server.datanode.FinalizedReplica in project hadoop by apache.
the class TestBlockListAsLongs method testDatanodeDetect.
@Test
public void testDatanodeDetect() throws ServiceException, IOException {
final AtomicReference<BlockReportRequestProto> request = new AtomicReference<>();
// just capture the outgoing PB
DatanodeProtocolPB mockProxy = mock(DatanodeProtocolPB.class);
doAnswer(new Answer<BlockReportResponseProto>() {
public BlockReportResponseProto answer(InvocationOnMock invocation) {
Object[] args = invocation.getArguments();
request.set((BlockReportRequestProto) args[1]);
return BlockReportResponseProto.newBuilder().build();
}
}).when(mockProxy).blockReport(any(RpcController.class), any(BlockReportRequestProto.class));
@SuppressWarnings("resource") DatanodeProtocolClientSideTranslatorPB nn = new DatanodeProtocolClientSideTranslatorPB(mockProxy);
DatanodeRegistration reg = DFSTestUtil.getLocalDatanodeRegistration();
NamespaceInfo nsInfo = new NamespaceInfo(1, "cluster", "bp", 1);
reg.setNamespaceInfo(nsInfo);
Replica r = new FinalizedReplica(new Block(1, 2, 3), null, null);
BlockListAsLongs bbl = BlockListAsLongs.encode(Collections.singleton(r));
DatanodeStorage storage = new DatanodeStorage("s1");
StorageBlockReport[] sbr = { new StorageBlockReport(storage, bbl) };
// check DN sends new-style BR
request.set(null);
nsInfo.setCapabilities(Capability.STORAGE_BLOCK_REPORT_BUFFERS.getMask());
nn.blockReport(reg, "pool", sbr, new BlockReportContext(1, 0, System.nanoTime(), 0L, true));
BlockReportRequestProto proto = request.get();
assertNotNull(proto);
assertTrue(proto.getReports(0).getBlocksList().isEmpty());
assertFalse(proto.getReports(0).getBlocksBuffersList().isEmpty());
// back up to prior version and check DN sends old-style BR
request.set(null);
nsInfo.setCapabilities(Capability.UNKNOWN.getMask());
BlockListAsLongs blockList = getBlockList(r);
StorageBlockReport[] obp = new StorageBlockReport[] { new StorageBlockReport(new DatanodeStorage("s1"), blockList) };
nn.blockReport(reg, "pool", obp, new BlockReportContext(1, 0, System.nanoTime(), 0L, true));
proto = request.get();
assertNotNull(proto);
assertFalse(proto.getReports(0).getBlocksList().isEmpty());
assertTrue(proto.getReports(0).getBlocksBuffersList().isEmpty());
}
use of org.apache.hadoop.hdfs.server.datanode.FinalizedReplica in project hadoop by apache.
the class TestBlockListAsLongs method testFinalized.
@Test
public void testFinalized() {
BlockListAsLongs blocks = checkReport(new FinalizedReplica(b1, null, null));
assertArrayEquals(new long[] { 1, 0, 1, 11, 111, -1, -1, -1 }, blocks.getBlockListAsLongs());
}
use of org.apache.hadoop.hdfs.server.datanode.FinalizedReplica in project hadoop by apache.
the class TestBlockListAsLongs method testFuzz.
@Test
public void testFuzz() throws InterruptedException {
Replica[] replicas = new Replica[100000];
Random rand = new Random(0);
for (int i = 0; i < replicas.length; i++) {
Block b = new Block(rand.nextLong(), i, i << 4);
switch(rand.nextInt(2)) {
case 0:
replicas[i] = new FinalizedReplica(b, null, null);
break;
case 1:
replicas[i] = new ReplicaBeingWritten(b, null, null, null);
break;
case 2:
replicas[i] = new ReplicaWaitingToBeRecovered(b, null, null);
break;
}
}
checkReport(replicas);
}
Aggregations