use of org.apache.hadoop.hdfs.server.datanode.Replica in project hadoop by apache.
the class FsDatasetImplTestUtils method getStoredReplicas.
@Override
public Iterator<Replica> getStoredReplicas(String bpid) throws IOException {
// Reload replicas from the disk.
ReplicaMap replicaMap = new ReplicaMap(dataset.datasetLock);
try (FsVolumeReferences refs = dataset.getFsVolumeReferences()) {
for (FsVolumeSpi vol : refs) {
FsVolumeImpl volume = (FsVolumeImpl) vol;
volume.getVolumeMap(bpid, replicaMap, dataset.ramDiskReplicaTracker);
}
}
// Cast ReplicaInfo to Replica, because ReplicaInfo assumes a file-based
// FsVolumeSpi implementation.
List<Replica> ret = new ArrayList<>();
if (replicaMap.replicas(bpid) != null) {
ret.addAll(replicaMap.replicas(bpid));
}
return ret.iterator();
}
use of org.apache.hadoop.hdfs.server.datanode.Replica in project hadoop by apache.
the class FsDatasetImpl method getBlockLocalPathInfo.
// FsDatasetSpi
@Override
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block) throws IOException {
try (AutoCloseableLock lock = datasetLock.acquire()) {
final Replica replica = volumeMap.get(block.getBlockPoolId(), block.getBlockId());
if (replica == null) {
throw new ReplicaNotFoundException(block);
}
if (replica.getGenerationStamp() < block.getGenerationStamp()) {
throw new IOException("Replica generation stamp < block generation stamp, block=" + block + ", replica=" + replica);
} else if (replica.getGenerationStamp() > block.getGenerationStamp()) {
block.setGenerationStamp(replica.getGenerationStamp());
}
}
ReplicaInfo r = getBlockReplica(block);
File blockFile = new File(r.getBlockURI());
File metaFile = new File(r.getMetadataURI());
BlockLocalPathInfo info = new BlockLocalPathInfo(block, blockFile.getAbsolutePath(), metaFile.toString());
return info;
}
use of org.apache.hadoop.hdfs.server.datanode.Replica in project hadoop by apache.
the class TestBlockListAsLongs method testDatanodeDetect.
@Test
public void testDatanodeDetect() throws ServiceException, IOException {
final AtomicReference<BlockReportRequestProto> request = new AtomicReference<>();
// just capture the outgoing PB
DatanodeProtocolPB mockProxy = mock(DatanodeProtocolPB.class);
doAnswer(new Answer<BlockReportResponseProto>() {
public BlockReportResponseProto answer(InvocationOnMock invocation) {
Object[] args = invocation.getArguments();
request.set((BlockReportRequestProto) args[1]);
return BlockReportResponseProto.newBuilder().build();
}
}).when(mockProxy).blockReport(any(RpcController.class), any(BlockReportRequestProto.class));
@SuppressWarnings("resource") DatanodeProtocolClientSideTranslatorPB nn = new DatanodeProtocolClientSideTranslatorPB(mockProxy);
DatanodeRegistration reg = DFSTestUtil.getLocalDatanodeRegistration();
NamespaceInfo nsInfo = new NamespaceInfo(1, "cluster", "bp", 1);
reg.setNamespaceInfo(nsInfo);
Replica r = new FinalizedReplica(new Block(1, 2, 3), null, null);
BlockListAsLongs bbl = BlockListAsLongs.encode(Collections.singleton(r));
DatanodeStorage storage = new DatanodeStorage("s1");
StorageBlockReport[] sbr = { new StorageBlockReport(storage, bbl) };
// check DN sends new-style BR
request.set(null);
nsInfo.setCapabilities(Capability.STORAGE_BLOCK_REPORT_BUFFERS.getMask());
nn.blockReport(reg, "pool", sbr, new BlockReportContext(1, 0, System.nanoTime(), 0L, true));
BlockReportRequestProto proto = request.get();
assertNotNull(proto);
assertTrue(proto.getReports(0).getBlocksList().isEmpty());
assertFalse(proto.getReports(0).getBlocksBuffersList().isEmpty());
// back up to prior version and check DN sends old-style BR
request.set(null);
nsInfo.setCapabilities(Capability.UNKNOWN.getMask());
BlockListAsLongs blockList = getBlockList(r);
StorageBlockReport[] obp = new StorageBlockReport[] { new StorageBlockReport(new DatanodeStorage("s1"), blockList) };
nn.blockReport(reg, "pool", obp, new BlockReportContext(1, 0, System.nanoTime(), 0L, true));
proto = request.get();
assertNotNull(proto);
assertFalse(proto.getReports(0).getBlocksList().isEmpty());
assertTrue(proto.getReports(0).getBlocksBuffersList().isEmpty());
}
use of org.apache.hadoop.hdfs.server.datanode.Replica in project hadoop by apache.
the class TestBlockListAsLongs method checkReplicas.
private void checkReplicas(Map<Long, Replica> expectedReplicas, BlockListAsLongs decodedBlocks) {
assertEquals(expectedReplicas.size(), decodedBlocks.getNumberOfBlocks());
Map<Long, Replica> reportReplicas = new HashMap<>(expectedReplicas);
for (BlockReportReplica replica : decodedBlocks) {
assertNotNull(replica);
Replica expected = reportReplicas.remove(replica.getBlockId());
assertNotNull(expected);
assertEquals("wrong bytes", expected.getNumBytes(), replica.getNumBytes());
assertEquals("wrong genstamp", expected.getGenerationStamp(), replica.getGenerationStamp());
assertEquals("wrong replica state", expected.getState(), replica.getState());
}
assertTrue(reportReplicas.isEmpty());
}
use of org.apache.hadoop.hdfs.server.datanode.Replica in project hadoop by apache.
the class TestBlockListAsLongs method getBlockList.
private BlockListAsLongs getBlockList(Replica... replicas) {
int numBlocks = replicas.length;
List<Long> longs = new ArrayList<Long>(2 + numBlocks);
longs.add(Long.valueOf(numBlocks));
longs.add(0L);
for (Replica r : replicas) {
longs.add(r.getBlockId());
longs.add(r.getBytesOnDisk());
longs.add(r.getGenerationStamp());
}
BlockListAsLongs blockList = BlockListAsLongs.decodeLongs(longs);
return blockList;
}
Aggregations