Search in sources :

Example 1 with Replica

use of org.apache.hadoop.hdfs.server.datanode.Replica in project hadoop by apache.

the class FsDatasetImplTestUtils method getStoredReplicas.

@Override
public Iterator<Replica> getStoredReplicas(String bpid) throws IOException {
    // Reload replicas from the disk.
    ReplicaMap replicaMap = new ReplicaMap(dataset.datasetLock);
    try (FsVolumeReferences refs = dataset.getFsVolumeReferences()) {
        for (FsVolumeSpi vol : refs) {
            FsVolumeImpl volume = (FsVolumeImpl) vol;
            volume.getVolumeMap(bpid, replicaMap, dataset.ramDiskReplicaTracker);
        }
    }
    // Cast ReplicaInfo to Replica, because ReplicaInfo assumes a file-based
    // FsVolumeSpi implementation.
    List<Replica> ret = new ArrayList<>();
    if (replicaMap.replicas(bpid) != null) {
        ret.addAll(replicaMap.replicas(bpid));
    }
    return ret.iterator();
}
Also used : FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi) ArrayList(java.util.ArrayList) FsVolumeReferences(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences) FinalizedReplica(org.apache.hadoop.hdfs.server.datanode.FinalizedReplica) Replica(org.apache.hadoop.hdfs.server.datanode.Replica)

Example 2 with Replica

use of org.apache.hadoop.hdfs.server.datanode.Replica in project hadoop by apache.

the class FsDatasetImpl method getBlockLocalPathInfo.

// FsDatasetSpi
@Override
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block) throws IOException {
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        final Replica replica = volumeMap.get(block.getBlockPoolId(), block.getBlockId());
        if (replica == null) {
            throw new ReplicaNotFoundException(block);
        }
        if (replica.getGenerationStamp() < block.getGenerationStamp()) {
            throw new IOException("Replica generation stamp < block generation stamp, block=" + block + ", replica=" + replica);
        } else if (replica.getGenerationStamp() > block.getGenerationStamp()) {
            block.setGenerationStamp(replica.getGenerationStamp());
        }
    }
    ReplicaInfo r = getBlockReplica(block);
    File blockFile = new File(r.getBlockURI());
    File metaFile = new File(r.getMetadataURI());
    BlockLocalPathInfo info = new BlockLocalPathInfo(block, blockFile.getAbsolutePath(), metaFile.toString());
    return info;
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) ReplicaNotFoundException(org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException) BlockLocalPathInfo(org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) RamDiskReplica(org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaTracker.RamDiskReplica) Replica(org.apache.hadoop.hdfs.server.datanode.Replica) File(java.io.File)

Example 3 with Replica

use of org.apache.hadoop.hdfs.server.datanode.Replica in project hadoop by apache.

the class TestBlockListAsLongs method testDatanodeDetect.

@Test
public void testDatanodeDetect() throws ServiceException, IOException {
    final AtomicReference<BlockReportRequestProto> request = new AtomicReference<>();
    // just capture the outgoing PB
    DatanodeProtocolPB mockProxy = mock(DatanodeProtocolPB.class);
    doAnswer(new Answer<BlockReportResponseProto>() {

        public BlockReportResponseProto answer(InvocationOnMock invocation) {
            Object[] args = invocation.getArguments();
            request.set((BlockReportRequestProto) args[1]);
            return BlockReportResponseProto.newBuilder().build();
        }
    }).when(mockProxy).blockReport(any(RpcController.class), any(BlockReportRequestProto.class));
    @SuppressWarnings("resource") DatanodeProtocolClientSideTranslatorPB nn = new DatanodeProtocolClientSideTranslatorPB(mockProxy);
    DatanodeRegistration reg = DFSTestUtil.getLocalDatanodeRegistration();
    NamespaceInfo nsInfo = new NamespaceInfo(1, "cluster", "bp", 1);
    reg.setNamespaceInfo(nsInfo);
    Replica r = new FinalizedReplica(new Block(1, 2, 3), null, null);
    BlockListAsLongs bbl = BlockListAsLongs.encode(Collections.singleton(r));
    DatanodeStorage storage = new DatanodeStorage("s1");
    StorageBlockReport[] sbr = { new StorageBlockReport(storage, bbl) };
    // check DN sends new-style BR
    request.set(null);
    nsInfo.setCapabilities(Capability.STORAGE_BLOCK_REPORT_BUFFERS.getMask());
    nn.blockReport(reg, "pool", sbr, new BlockReportContext(1, 0, System.nanoTime(), 0L, true));
    BlockReportRequestProto proto = request.get();
    assertNotNull(proto);
    assertTrue(proto.getReports(0).getBlocksList().isEmpty());
    assertFalse(proto.getReports(0).getBlocksBuffersList().isEmpty());
    // back up to prior version and check DN sends old-style BR
    request.set(null);
    nsInfo.setCapabilities(Capability.UNKNOWN.getMask());
    BlockListAsLongs blockList = getBlockList(r);
    StorageBlockReport[] obp = new StorageBlockReport[] { new StorageBlockReport(new DatanodeStorage("s1"), blockList) };
    nn.blockReport(reg, "pool", obp, new BlockReportContext(1, 0, System.nanoTime(), 0L, true));
    proto = request.get();
    assertNotNull(proto);
    assertFalse(proto.getReports(0).getBlocksList().isEmpty());
    assertTrue(proto.getReports(0).getBlocksBuffersList().isEmpty());
}
Also used : StorageBlockReport(org.apache.hadoop.hdfs.server.protocol.StorageBlockReport) AtomicReference(java.util.concurrent.atomic.AtomicReference) FinalizedReplica(org.apache.hadoop.hdfs.server.datanode.FinalizedReplica) BlockReportReplica(org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica) Replica(org.apache.hadoop.hdfs.server.datanode.Replica) FinalizedReplica(org.apache.hadoop.hdfs.server.datanode.FinalizedReplica) RpcController(com.google.protobuf.RpcController) DatanodeProtocolClientSideTranslatorPB(org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB) DatanodeProtocolPB(org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolPB) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) BlockReportRequestProto(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto) InvocationOnMock(org.mockito.invocation.InvocationOnMock) BlockReportContext(org.apache.hadoop.hdfs.server.protocol.BlockReportContext) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) BlockReportResponseProto(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto) NamespaceInfo(org.apache.hadoop.hdfs.server.protocol.NamespaceInfo) Test(org.junit.Test)

Example 4 with Replica

use of org.apache.hadoop.hdfs.server.datanode.Replica in project hadoop by apache.

the class TestBlockListAsLongs method checkReplicas.

private void checkReplicas(Map<Long, Replica> expectedReplicas, BlockListAsLongs decodedBlocks) {
    assertEquals(expectedReplicas.size(), decodedBlocks.getNumberOfBlocks());
    Map<Long, Replica> reportReplicas = new HashMap<>(expectedReplicas);
    for (BlockReportReplica replica : decodedBlocks) {
        assertNotNull(replica);
        Replica expected = reportReplicas.remove(replica.getBlockId());
        assertNotNull(expected);
        assertEquals("wrong bytes", expected.getNumBytes(), replica.getNumBytes());
        assertEquals("wrong genstamp", expected.getGenerationStamp(), replica.getGenerationStamp());
        assertEquals("wrong replica state", expected.getState(), replica.getState());
    }
    assertTrue(reportReplicas.isEmpty());
}
Also used : HashMap(java.util.HashMap) BlockReportReplica(org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica) FinalizedReplica(org.apache.hadoop.hdfs.server.datanode.FinalizedReplica) BlockReportReplica(org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica) Replica(org.apache.hadoop.hdfs.server.datanode.Replica)

Example 5 with Replica

use of org.apache.hadoop.hdfs.server.datanode.Replica in project hadoop by apache.

the class TestBlockListAsLongs method getBlockList.

private BlockListAsLongs getBlockList(Replica... replicas) {
    int numBlocks = replicas.length;
    List<Long> longs = new ArrayList<Long>(2 + numBlocks);
    longs.add(Long.valueOf(numBlocks));
    longs.add(0L);
    for (Replica r : replicas) {
        longs.add(r.getBlockId());
        longs.add(r.getBytesOnDisk());
        longs.add(r.getGenerationStamp());
    }
    BlockListAsLongs blockList = BlockListAsLongs.decodeLongs(longs);
    return blockList;
}
Also used : ArrayList(java.util.ArrayList) FinalizedReplica(org.apache.hadoop.hdfs.server.datanode.FinalizedReplica) BlockReportReplica(org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica) Replica(org.apache.hadoop.hdfs.server.datanode.Replica)

Aggregations

Replica (org.apache.hadoop.hdfs.server.datanode.Replica)9 FinalizedReplica (org.apache.hadoop.hdfs.server.datanode.FinalizedReplica)7 BlockReportReplica (org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica)5 Test (org.junit.Test)4 ArrayList (java.util.ArrayList)3 IOException (java.io.IOException)2 HashMap (java.util.HashMap)2 Path (org.apache.hadoop.fs.Path)2 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)2 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)2 ByteString (com.google.protobuf.ByteString)1 RpcController (com.google.protobuf.RpcController)1 File (java.io.File)1 Random (java.util.Random)1 AtomicReference (java.util.concurrent.atomic.AtomicReference)1 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)1 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)1 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)1 BlockLocalPathInfo (org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo)1 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)1