Search in sources :

Example 6 with ReplicaRecoveryInfo

use of org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo in project hadoop by apache.

the class TestBlockRecovery method initBlockRecords.

private List<BlockRecord> initBlockRecords(DataNode spyDN) throws IOException {
    List<BlockRecord> blocks = new ArrayList<BlockRecord>(1);
    DatanodeRegistration dnR = dn.getDNRegistrationForBP(block.getBlockPoolId());
    BlockRecord blockRecord = new BlockRecord(new DatanodeID(dnR), spyDN, new ReplicaRecoveryInfo(block.getBlockId(), block.getNumBytes(), block.getGenerationStamp(), ReplicaState.FINALIZED));
    blocks.add(blockRecord);
    return blocks;
}
Also used : DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) ReplicaRecoveryInfo(org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo) BlockRecord(org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.BlockRecord) ArrayList(java.util.ArrayList)

Example 7 with ReplicaRecoveryInfo

use of org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo in project hadoop by apache.

the class TestBlockRecovery method testRBW_RWRReplicas.

/**
   * BlockRecovery_02.12.
   * One replica is RBW and another is RWR. 
   * @throws IOException in case of an error
   */
@Test(timeout = 60000)
public void testRBW_RWRReplicas() throws IOException {
    if (LOG.isDebugEnabled()) {
        LOG.debug("Running " + GenericTestUtils.getMethodName());
    }
    ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID, REPLICA_LEN1, GEN_STAMP - 1, ReplicaState.RBW);
    ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID, REPLICA_LEN1, GEN_STAMP - 2, ReplicaState.RWR);
    InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class);
    InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);
    testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
    verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
    verify(dn2, never()).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
}
Also used : ReplicaRecoveryInfo(org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo) InterDatanodeProtocol(org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol) Test(org.junit.Test)

Example 8 with ReplicaRecoveryInfo

use of org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo in project hadoop by apache.

the class TestBlockRecovery method testRBWReplicas.

/**
   * BlockRecovery_02.11.
   * Two replicas are RBW.
   * @throws IOException in case of an error
   */
@Test(timeout = 60000)
public void testRBWReplicas() throws IOException {
    if (LOG.isDebugEnabled()) {
        LOG.debug("Running " + GenericTestUtils.getMethodName());
    }
    ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID, REPLICA_LEN1, GEN_STAMP - 1, ReplicaState.RBW);
    ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID, REPLICA_LEN2, GEN_STAMP - 2, ReplicaState.RBW);
    InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class);
    InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);
    long minLen = Math.min(REPLICA_LEN1, REPLICA_LEN2);
    testSyncReplicas(replica1, replica2, dn1, dn2, minLen);
    verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, minLen);
    verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, minLen);
}
Also used : ReplicaRecoveryInfo(org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo) InterDatanodeProtocol(org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol) Test(org.junit.Test)

Example 9 with ReplicaRecoveryInfo

use of org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo in project hadoop by apache.

the class TestBlockRecovery method testRURReplicas.

/**
   * DNs report RUR instead of RBW, RWR or FINALIZED. Primary DN expected to
   * throw an exception.
   * @throws Exception
   */
@Test(timeout = 60000)
public void testRURReplicas() throws Exception {
    if (LOG.isDebugEnabled()) {
        LOG.debug("Running " + GenericTestUtils.getMethodName());
    }
    doReturn(new ReplicaRecoveryInfo(block.getBlockId(), block.getNumBytes(), block.getGenerationStamp(), ReplicaState.RUR)).when(spyDN).initReplicaRecovery(any(RecoveringBlock.class));
    boolean exceptionThrown = false;
    try {
        for (RecoveringBlock rBlock : initRecoveringBlocks()) {
            BlockRecoveryWorker.RecoveryTaskContiguous RecoveryTaskContiguous = recoveryWorker.new RecoveryTaskContiguous(rBlock);
            BlockRecoveryWorker.RecoveryTaskContiguous spyTask = spy(RecoveryTaskContiguous);
            spyTask.recover();
        }
    } catch (IOException e) {
        // expect IOException to be thrown here
        e.printStackTrace();
        assertTrue("Wrong exception was thrown: " + e.getMessage(), e.getMessage().contains("Found 1 replica(s) for block " + block + " but none is in RWR or better state"));
        exceptionThrown = true;
    } finally {
        assertTrue(exceptionThrown);
    }
}
Also used : ReplicaRecoveryInfo(org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo) RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock) IOException(java.io.IOException) Test(org.junit.Test)

Example 10 with ReplicaRecoveryInfo

use of org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo in project hadoop by apache.

the class TestBlockRecovery method testFinalizedRwrReplicas.

/**
   * BlockRecovery_02.10.
   * One replica is Finalized and another is RWR. 
   * @throws IOException in case of an error
   */
@Test(timeout = 60000)
public void testFinalizedRwrReplicas() throws IOException {
    if (LOG.isDebugEnabled()) {
        LOG.debug("Running " + GenericTestUtils.getMethodName());
    }
    // rbw and finalized replicas have the same length
    ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID, REPLICA_LEN1, GEN_STAMP - 1, ReplicaState.FINALIZED);
    ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID, REPLICA_LEN1, GEN_STAMP - 2, ReplicaState.RWR);
    InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class);
    InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);
    testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
    verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
    verify(dn2, never()).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
    // rbw replica has a different length from the finalized one
    replica1 = new ReplicaRecoveryInfo(BLOCK_ID, REPLICA_LEN1, GEN_STAMP - 1, ReplicaState.FINALIZED);
    replica2 = new ReplicaRecoveryInfo(BLOCK_ID, REPLICA_LEN2, GEN_STAMP - 2, ReplicaState.RBW);
    dn1 = mock(InterDatanodeProtocol.class);
    dn2 = mock(InterDatanodeProtocol.class);
    testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
    verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
    verify(dn2, never()).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
}
Also used : ReplicaRecoveryInfo(org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo) InterDatanodeProtocol(org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol) Test(org.junit.Test)

Aggregations

ReplicaRecoveryInfo (org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo)15 Test (org.junit.Test)11 IOException (java.io.IOException)7 InterDatanodeProtocol (org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol)7 RecoveringBlock (org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock)6 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)3 ServiceException (com.google.protobuf.ServiceException)2 Block (org.apache.hadoop.hdfs.protocol.Block)2 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)2 BlockRecord (org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.BlockRecord)2 ArrayList (java.util.ArrayList)1 HashMap (java.util.HashMap)1 Path (org.apache.hadoop.fs.Path)1 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)1 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)1 DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)1 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)1 ErasureCodingPolicy (org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy)1 RecoveryInProgressException (org.apache.hadoop.hdfs.protocol.RecoveryInProgressException)1 BlockProto (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto)1