Search in sources :

Example 1 with ReplicaUnderRecovery

use of org.apache.hadoop.hdfs.server.datanode.ReplicaUnderRecovery in project hadoop by apache.

the class FsDatasetImplTestUtils method createReplicaUnderRecovery.

@Override
public Replica createReplicaUnderRecovery(ExtendedBlock block, long recoveryId) throws IOException {
    try (FsVolumeReferences volumes = dataset.getFsVolumeReferences()) {
        FsVolumeImpl volume = (FsVolumeImpl) volumes.get(0);
        ReplicaUnderRecovery rur = new ReplicaUnderRecovery(new FinalizedReplica(block.getLocalBlock(), volume, volume.getCurrentDir().getParentFile()), recoveryId);
        dataset.volumeMap.add(block.getBlockPoolId(), rur);
        return rur;
    }
}
Also used : FsVolumeReferences(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences) ReplicaUnderRecovery(org.apache.hadoop.hdfs.server.datanode.ReplicaUnderRecovery) FinalizedReplica(org.apache.hadoop.hdfs.server.datanode.FinalizedReplica)

Example 2 with ReplicaUnderRecovery

use of org.apache.hadoop.hdfs.server.datanode.ReplicaUnderRecovery in project hadoop by apache.

the class TestInterDatanodeProtocol method testInitReplicaRecovery.

/** Test 
   * {@link FsDatasetImpl#initReplicaRecovery(String, ReplicaMap, Block, long, long)}
   */
@Test
public void testInitReplicaRecovery() throws IOException {
    final long firstblockid = 10000L;
    final long gs = 7777L;
    final long length = 22L;
    final ReplicaMap map = new ReplicaMap(new AutoCloseableLock());
    String bpid = "BP-TEST";
    final Block[] blocks = new Block[5];
    for (int i = 0; i < blocks.length; i++) {
        blocks[i] = new Block(firstblockid + i, length, gs);
        map.add(bpid, createReplicaInfo(blocks[i]));
    }
    {
        //normal case
        final Block b = blocks[0];
        final ReplicaInfo originalInfo = map.get(bpid, b);
        final long recoveryid = gs + 1;
        final ReplicaRecoveryInfo recoveryInfo = FsDatasetImpl.initReplicaRecovery(bpid, map, blocks[0], recoveryid, DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
        assertEquals(originalInfo, recoveryInfo);
        final ReplicaUnderRecovery updatedInfo = (ReplicaUnderRecovery) map.get(bpid, b);
        Assert.assertEquals(originalInfo.getBlockId(), updatedInfo.getBlockId());
        Assert.assertEquals(recoveryid, updatedInfo.getRecoveryID());
        //recover one more time 
        final long recoveryid2 = gs + 2;
        final ReplicaRecoveryInfo recoveryInfo2 = FsDatasetImpl.initReplicaRecovery(bpid, map, blocks[0], recoveryid2, DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
        assertEquals(originalInfo, recoveryInfo2);
        final ReplicaUnderRecovery updatedInfo2 = (ReplicaUnderRecovery) map.get(bpid, b);
        Assert.assertEquals(originalInfo.getBlockId(), updatedInfo2.getBlockId());
        Assert.assertEquals(recoveryid2, updatedInfo2.getRecoveryID());
        //case RecoveryInProgressException
        try {
            FsDatasetImpl.initReplicaRecovery(bpid, map, b, recoveryid, DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
            Assert.fail();
        } catch (RecoveryInProgressException ripe) {
            System.out.println("GOOD: getting " + ripe);
        }
    }
    {
        // BlockRecoveryFI_01: replica not found
        final long recoveryid = gs + 1;
        final Block b = new Block(firstblockid - 1, length, gs);
        ReplicaRecoveryInfo r = FsDatasetImpl.initReplicaRecovery(bpid, map, b, recoveryid, DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
        Assert.assertNull("Data-node should not have this replica.", r);
    }
    {
        // BlockRecoveryFI_02: "THIS IS NOT SUPPOSED TO HAPPEN" with recovery id < gs  
        final long recoveryid = gs - 1;
        final Block b = new Block(firstblockid + 1, length, gs);
        try {
            FsDatasetImpl.initReplicaRecovery(bpid, map, b, recoveryid, DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
            Assert.fail();
        } catch (IOException ioe) {
            System.out.println("GOOD: getting " + ioe);
        }
    }
    // BlockRecoveryFI_03: Replica's gs is less than the block's gs
    {
        final long recoveryid = gs + 1;
        final Block b = new Block(firstblockid, length, gs + 1);
        try {
            FsDatasetImpl.initReplicaRecovery(bpid, map, b, recoveryid, DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
            fail("InitReplicaRecovery should fail because replica's " + "gs is less than the block's gs");
        } catch (IOException e) {
            e.getMessage().startsWith("replica.getGenerationStamp() < block.getGenerationStamp(), block=");
        }
    }
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) ReplicaRecoveryInfo(org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) IOException(java.io.IOException) RecoveryInProgressException(org.apache.hadoop.hdfs.protocol.RecoveryInProgressException) ReplicaUnderRecovery(org.apache.hadoop.hdfs.server.datanode.ReplicaUnderRecovery) Test(org.junit.Test)

Aggregations

ReplicaUnderRecovery (org.apache.hadoop.hdfs.server.datanode.ReplicaUnderRecovery)2 IOException (java.io.IOException)1 Block (org.apache.hadoop.hdfs.protocol.Block)1 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)1 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)1 RecoveryInProgressException (org.apache.hadoop.hdfs.protocol.RecoveryInProgressException)1 FinalizedReplica (org.apache.hadoop.hdfs.server.datanode.FinalizedReplica)1 ReplicaInfo (org.apache.hadoop.hdfs.server.datanode.ReplicaInfo)1 FsVolumeReferences (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences)1 RecoveringBlock (org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock)1 ReplicaRecoveryInfo (org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo)1 AutoCloseableLock (org.apache.hadoop.util.AutoCloseableLock)1 Test (org.junit.Test)1