Search in sources :

Example 1 with ReplicaRecoveryInfo

use of org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo in project hadoop by apache.

the class TestBlockRecovery method testSafeLength.

@Test(timeout = 60000)
public void testSafeLength() throws Exception {
    // hard coded policy to work with hard coded test suite
    ErasureCodingPolicy ecPolicy = ErasureCodingPolicyManager.getSystemPolicies()[0];
    RecoveringStripedBlock rBlockStriped = new RecoveringStripedBlock(rBlock, new byte[9], ecPolicy);
    BlockRecoveryWorker recoveryWorker = new BlockRecoveryWorker(dn);
    BlockRecoveryWorker.RecoveryTaskStriped recoveryTask = recoveryWorker.new RecoveryTaskStriped(rBlockStriped);
    for (int i = 0; i < blockLengthsSuite.length; i++) {
        int[] blockLengths = blockLengthsSuite[i][0];
        int safeLength = blockLengthsSuite[i][1][0];
        Map<Long, BlockRecord> syncList = new HashMap<>();
        for (int id = 0; id < blockLengths.length; id++) {
            ReplicaRecoveryInfo rInfo = new ReplicaRecoveryInfo(id, blockLengths[id], 0, null);
            syncList.put((long) id, new BlockRecord(null, null, rInfo));
        }
        Assert.assertEquals("BLOCK_LENGTHS_SUITE[" + i + "]", safeLength, recoveryTask.getSafeLength(syncList));
    }
}
Also used : RecoveringStripedBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringStripedBlock) HashMap(java.util.HashMap) ReplicaRecoveryInfo(org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo) BlockRecord(org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.BlockRecord) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy) Matchers.anyLong(org.mockito.Matchers.anyLong) Test(org.junit.Test)

Example 2 with ReplicaRecoveryInfo

use of org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo in project hadoop by apache.

the class TestBlockRecovery method testFinalizedReplicas.

/**
   * BlockRecovery_02.8.
   * Two replicas are in Finalized state
   * @throws IOException in case of an error
   */
@Test(timeout = 60000)
public void testFinalizedReplicas() throws IOException {
    if (LOG.isDebugEnabled()) {
        LOG.debug("Running " + GenericTestUtils.getMethodName());
    }
    ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID, REPLICA_LEN1, GEN_STAMP - 1, ReplicaState.FINALIZED);
    ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID, REPLICA_LEN1, GEN_STAMP - 2, ReplicaState.FINALIZED);
    InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class);
    InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);
    testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
    verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
    verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
    // two finalized replicas have different length
    replica1 = new ReplicaRecoveryInfo(BLOCK_ID, REPLICA_LEN1, GEN_STAMP - 1, ReplicaState.FINALIZED);
    replica2 = new ReplicaRecoveryInfo(BLOCK_ID, REPLICA_LEN2, GEN_STAMP - 2, ReplicaState.FINALIZED);
    try {
        testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
        Assert.fail("Two finalized replicas should not have different lengthes!");
    } catch (IOException e) {
        Assert.assertTrue(e.getMessage().startsWith("Inconsistent size of finalized replicas. "));
    }
}
Also used : ReplicaRecoveryInfo(org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo) InterDatanodeProtocol(org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol) IOException(java.io.IOException) Test(org.junit.Test)

Example 3 with ReplicaRecoveryInfo

use of org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo in project hadoop by apache.

the class TestBlockRecovery method initBlockRecords.

private List<BlockRecord> initBlockRecords(DataNode spyDN) throws IOException {
    List<BlockRecord> blocks = new ArrayList<BlockRecord>(1);
    DatanodeRegistration dnR = dn.getDNRegistrationForBP(block.getBlockPoolId());
    BlockRecord blockRecord = new BlockRecord(new DatanodeID(dnR), spyDN, new ReplicaRecoveryInfo(block.getBlockId(), block.getNumBytes(), block.getGenerationStamp(), ReplicaState.FINALIZED));
    blocks.add(blockRecord);
    return blocks;
}
Also used : DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) ReplicaRecoveryInfo(org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo) BlockRecord(org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.BlockRecord) ArrayList(java.util.ArrayList)

Example 4 with ReplicaRecoveryInfo

use of org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo in project hadoop by apache.

the class TestBlockRecovery method testRBW_RWRReplicas.

/**
   * BlockRecovery_02.12.
   * One replica is RBW and another is RWR. 
   * @throws IOException in case of an error
   */
@Test(timeout = 60000)
public void testRBW_RWRReplicas() throws IOException {
    if (LOG.isDebugEnabled()) {
        LOG.debug("Running " + GenericTestUtils.getMethodName());
    }
    ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID, REPLICA_LEN1, GEN_STAMP - 1, ReplicaState.RBW);
    ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID, REPLICA_LEN1, GEN_STAMP - 2, ReplicaState.RWR);
    InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class);
    InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);
    testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
    verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
    verify(dn2, never()).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
}
Also used : ReplicaRecoveryInfo(org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo) InterDatanodeProtocol(org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol) Test(org.junit.Test)

Example 5 with ReplicaRecoveryInfo

use of org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo in project hadoop by apache.

the class TestBlockRecovery method testRBWReplicas.

/**
   * BlockRecovery_02.11.
   * Two replicas are RBW.
   * @throws IOException in case of an error
   */
@Test(timeout = 60000)
public void testRBWReplicas() throws IOException {
    if (LOG.isDebugEnabled()) {
        LOG.debug("Running " + GenericTestUtils.getMethodName());
    }
    ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID, REPLICA_LEN1, GEN_STAMP - 1, ReplicaState.RBW);
    ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID, REPLICA_LEN2, GEN_STAMP - 2, ReplicaState.RBW);
    InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class);
    InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);
    long minLen = Math.min(REPLICA_LEN1, REPLICA_LEN2);
    testSyncReplicas(replica1, replica2, dn1, dn2, minLen);
    verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, minLen);
    verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, minLen);
}
Also used : ReplicaRecoveryInfo(org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo) InterDatanodeProtocol(org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol) Test(org.junit.Test)

Aggregations

ReplicaRecoveryInfo (org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo)15 Test (org.junit.Test)11 IOException (java.io.IOException)7 InterDatanodeProtocol (org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol)7 RecoveringBlock (org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock)6 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)3 ServiceException (com.google.protobuf.ServiceException)2 Block (org.apache.hadoop.hdfs.protocol.Block)2 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)2 BlockRecord (org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.BlockRecord)2 ArrayList (java.util.ArrayList)1 HashMap (java.util.HashMap)1 Path (org.apache.hadoop.fs.Path)1 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)1 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)1 DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)1 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)1 ErasureCodingPolicy (org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy)1 RecoveryInProgressException (org.apache.hadoop.hdfs.protocol.RecoveryInProgressException)1 BlockProto (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto)1