Search in sources :

Example 11 with ReplicaRecoveryInfo

use of org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo in project hadoop by apache.

the class SimulatedFSDataset method initReplicaRecovery.

@Override
public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock) throws IOException {
    ExtendedBlock b = rBlock.getBlock();
    final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
    BInfo binfo = map.get(b.getLocalBlock());
    if (binfo == null) {
        throw new IOException("No such Block " + b);
    }
    return new ReplicaRecoveryInfo(binfo.getBlockId(), binfo.getBytesOnDisk(), binfo.getGenerationStamp(), binfo.isFinalized() ? ReplicaState.FINALIZED : ReplicaState.RBW);
}
Also used : ReplicaRecoveryInfo(org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock) IOException(java.io.IOException)

Example 12 with ReplicaRecoveryInfo

use of org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo in project hadoop by apache.

the class TestBlockRecovery method testRWRReplicas.

/**
   * BlockRecovery_02.13. 
   * Two replicas are RWR.
   * @throws IOException in case of an error
   */
@Test(timeout = 60000)
public void testRWRReplicas() throws IOException {
    if (LOG.isDebugEnabled()) {
        LOG.debug("Running " + GenericTestUtils.getMethodName());
    }
    ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID, REPLICA_LEN1, GEN_STAMP - 1, ReplicaState.RWR);
    ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID, REPLICA_LEN2, GEN_STAMP - 2, ReplicaState.RWR);
    InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class);
    InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);
    long minLen = Math.min(REPLICA_LEN1, REPLICA_LEN2);
    testSyncReplicas(replica1, replica2, dn1, dn2, minLen);
    verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, minLen);
    verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, minLen);
}
Also used : ReplicaRecoveryInfo(org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo) InterDatanodeProtocol(org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol) Test(org.junit.Test)

Example 13 with ReplicaRecoveryInfo

use of org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo in project hadoop by apache.

the class TestBlockRecovery method testFinalizedRbwReplicas.

/**
   * BlockRecovery_02.9.
   * One replica is Finalized and another is RBW. 
   * @throws IOException in case of an error
   */
@Test(timeout = 60000)
public void testFinalizedRbwReplicas() throws IOException {
    if (LOG.isDebugEnabled()) {
        LOG.debug("Running " + GenericTestUtils.getMethodName());
    }
    // rbw and finalized replicas have the same length
    ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID, REPLICA_LEN1, GEN_STAMP - 1, ReplicaState.FINALIZED);
    ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID, REPLICA_LEN1, GEN_STAMP - 2, ReplicaState.RBW);
    InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class);
    InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);
    testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
    verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
    verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
    // rbw replica has a different length from the finalized one
    replica1 = new ReplicaRecoveryInfo(BLOCK_ID, REPLICA_LEN1, GEN_STAMP - 1, ReplicaState.FINALIZED);
    replica2 = new ReplicaRecoveryInfo(BLOCK_ID, REPLICA_LEN2, GEN_STAMP - 2, ReplicaState.RBW);
    dn1 = mock(InterDatanodeProtocol.class);
    dn2 = mock(InterDatanodeProtocol.class);
    testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
    verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
    verify(dn2, never()).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
}
Also used : ReplicaRecoveryInfo(org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo) InterDatanodeProtocol(org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol) Test(org.junit.Test)

Example 14 with ReplicaRecoveryInfo

use of org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo in project hadoop by apache.

the class TestBlockRecovery method testZeroLenReplicas.

/**
   * BlockRecoveryFI_07. max replica length from all DNs is zero.
   *
   * @throws IOException in case of an error
   */
@Test(timeout = 60000)
public void testZeroLenReplicas() throws IOException, InterruptedException {
    if (LOG.isDebugEnabled()) {
        LOG.debug("Running " + GenericTestUtils.getMethodName());
    }
    doReturn(new ReplicaRecoveryInfo(block.getBlockId(), 0, block.getGenerationStamp(), ReplicaState.FINALIZED)).when(spyDN).initReplicaRecovery(any(RecoveringBlock.class));
    for (RecoveringBlock rBlock : initRecoveringBlocks()) {
        BlockRecoveryWorker.RecoveryTaskContiguous RecoveryTaskContiguous = recoveryWorker.new RecoveryTaskContiguous(rBlock);
        BlockRecoveryWorker.RecoveryTaskContiguous spyTask = spy(RecoveryTaskContiguous);
        spyTask.recover();
    }
    DatanodeProtocol dnP = recoveryWorker.getActiveNamenodeForBP(POOL_ID);
    verify(dnP).commitBlockSynchronization(block, RECOVERY_ID, 0, true, true, DatanodeID.EMPTY_ARRAY, null);
}
Also used : ReplicaRecoveryInfo(org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo) RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock) InterDatanodeProtocol(org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol) DatanodeProtocol(org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol) Test(org.junit.Test)

Example 15 with ReplicaRecoveryInfo

use of org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo in project hadoop by apache.

the class TestInterDatanodeProtocol method testInitReplicaRecovery.

/** Test 
   * {@link FsDatasetImpl#initReplicaRecovery(String, ReplicaMap, Block, long, long)}
   */
@Test
public void testInitReplicaRecovery() throws IOException {
    final long firstblockid = 10000L;
    final long gs = 7777L;
    final long length = 22L;
    final ReplicaMap map = new ReplicaMap(new AutoCloseableLock());
    String bpid = "BP-TEST";
    final Block[] blocks = new Block[5];
    for (int i = 0; i < blocks.length; i++) {
        blocks[i] = new Block(firstblockid + i, length, gs);
        map.add(bpid, createReplicaInfo(blocks[i]));
    }
    {
        //normal case
        final Block b = blocks[0];
        final ReplicaInfo originalInfo = map.get(bpid, b);
        final long recoveryid = gs + 1;
        final ReplicaRecoveryInfo recoveryInfo = FsDatasetImpl.initReplicaRecovery(bpid, map, blocks[0], recoveryid, DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
        assertEquals(originalInfo, recoveryInfo);
        final ReplicaUnderRecovery updatedInfo = (ReplicaUnderRecovery) map.get(bpid, b);
        Assert.assertEquals(originalInfo.getBlockId(), updatedInfo.getBlockId());
        Assert.assertEquals(recoveryid, updatedInfo.getRecoveryID());
        //recover one more time 
        final long recoveryid2 = gs + 2;
        final ReplicaRecoveryInfo recoveryInfo2 = FsDatasetImpl.initReplicaRecovery(bpid, map, blocks[0], recoveryid2, DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
        assertEquals(originalInfo, recoveryInfo2);
        final ReplicaUnderRecovery updatedInfo2 = (ReplicaUnderRecovery) map.get(bpid, b);
        Assert.assertEquals(originalInfo.getBlockId(), updatedInfo2.getBlockId());
        Assert.assertEquals(recoveryid2, updatedInfo2.getRecoveryID());
        //case RecoveryInProgressException
        try {
            FsDatasetImpl.initReplicaRecovery(bpid, map, b, recoveryid, DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
            Assert.fail();
        } catch (RecoveryInProgressException ripe) {
            System.out.println("GOOD: getting " + ripe);
        }
    }
    {
        // BlockRecoveryFI_01: replica not found
        final long recoveryid = gs + 1;
        final Block b = new Block(firstblockid - 1, length, gs);
        ReplicaRecoveryInfo r = FsDatasetImpl.initReplicaRecovery(bpid, map, b, recoveryid, DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
        Assert.assertNull("Data-node should not have this replica.", r);
    }
    {
        // BlockRecoveryFI_02: "THIS IS NOT SUPPOSED TO HAPPEN" with recovery id < gs  
        final long recoveryid = gs - 1;
        final Block b = new Block(firstblockid + 1, length, gs);
        try {
            FsDatasetImpl.initReplicaRecovery(bpid, map, b, recoveryid, DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
            Assert.fail();
        } catch (IOException ioe) {
            System.out.println("GOOD: getting " + ioe);
        }
    }
    // BlockRecoveryFI_03: Replica's gs is less than the block's gs
    {
        final long recoveryid = gs + 1;
        final Block b = new Block(firstblockid, length, gs + 1);
        try {
            FsDatasetImpl.initReplicaRecovery(bpid, map, b, recoveryid, DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
            fail("InitReplicaRecovery should fail because replica's " + "gs is less than the block's gs");
        } catch (IOException e) {
            e.getMessage().startsWith("replica.getGenerationStamp() < block.getGenerationStamp(), block=");
        }
    }
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) ReplicaRecoveryInfo(org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) IOException(java.io.IOException) RecoveryInProgressException(org.apache.hadoop.hdfs.protocol.RecoveryInProgressException) ReplicaUnderRecovery(org.apache.hadoop.hdfs.server.datanode.ReplicaUnderRecovery) Test(org.junit.Test)

Aggregations

ReplicaRecoveryInfo (org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo)15 Test (org.junit.Test)11 IOException (java.io.IOException)7 InterDatanodeProtocol (org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol)7 RecoveringBlock (org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock)6 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)3 ServiceException (com.google.protobuf.ServiceException)2 Block (org.apache.hadoop.hdfs.protocol.Block)2 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)2 BlockRecord (org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.BlockRecord)2 ArrayList (java.util.ArrayList)1 HashMap (java.util.HashMap)1 Path (org.apache.hadoop.fs.Path)1 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)1 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)1 DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)1 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)1 ErasureCodingPolicy (org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy)1 RecoveryInProgressException (org.apache.hadoop.hdfs.protocol.RecoveryInProgressException)1 BlockProto (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto)1