use of org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo in project hadoop by apache.
the class SimulatedFSDataset method initReplicaRecovery.
@Override
public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock) throws IOException {
ExtendedBlock b = rBlock.getBlock();
final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
BInfo binfo = map.get(b.getLocalBlock());
if (binfo == null) {
throw new IOException("No such Block " + b);
}
return new ReplicaRecoveryInfo(binfo.getBlockId(), binfo.getBytesOnDisk(), binfo.getGenerationStamp(), binfo.isFinalized() ? ReplicaState.FINALIZED : ReplicaState.RBW);
}
use of org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo in project hadoop by apache.
the class TestBlockRecovery method testRWRReplicas.
/**
* BlockRecovery_02.13.
* Two replicas are RWR.
* @throws IOException in case of an error
*/
@Test(timeout = 60000)
public void testRWRReplicas() throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID, REPLICA_LEN1, GEN_STAMP - 1, ReplicaState.RWR);
ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID, REPLICA_LEN2, GEN_STAMP - 2, ReplicaState.RWR);
InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class);
InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);
long minLen = Math.min(REPLICA_LEN1, REPLICA_LEN2);
testSyncReplicas(replica1, replica2, dn1, dn2, minLen);
verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, minLen);
verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, minLen);
}
use of org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo in project hadoop by apache.
the class TestBlockRecovery method testFinalizedRbwReplicas.
/**
* BlockRecovery_02.9.
* One replica is Finalized and another is RBW.
* @throws IOException in case of an error
*/
@Test(timeout = 60000)
public void testFinalizedRbwReplicas() throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
// rbw and finalized replicas have the same length
ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID, REPLICA_LEN1, GEN_STAMP - 1, ReplicaState.FINALIZED);
ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID, REPLICA_LEN1, GEN_STAMP - 2, ReplicaState.RBW);
InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class);
InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);
testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
// rbw replica has a different length from the finalized one
replica1 = new ReplicaRecoveryInfo(BLOCK_ID, REPLICA_LEN1, GEN_STAMP - 1, ReplicaState.FINALIZED);
replica2 = new ReplicaRecoveryInfo(BLOCK_ID, REPLICA_LEN2, GEN_STAMP - 2, ReplicaState.RBW);
dn1 = mock(InterDatanodeProtocol.class);
dn2 = mock(InterDatanodeProtocol.class);
testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
verify(dn2, never()).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
}
use of org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo in project hadoop by apache.
the class TestBlockRecovery method testZeroLenReplicas.
/**
* BlockRecoveryFI_07. max replica length from all DNs is zero.
*
* @throws IOException in case of an error
*/
@Test(timeout = 60000)
public void testZeroLenReplicas() throws IOException, InterruptedException {
if (LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
doReturn(new ReplicaRecoveryInfo(block.getBlockId(), 0, block.getGenerationStamp(), ReplicaState.FINALIZED)).when(spyDN).initReplicaRecovery(any(RecoveringBlock.class));
for (RecoveringBlock rBlock : initRecoveringBlocks()) {
BlockRecoveryWorker.RecoveryTaskContiguous RecoveryTaskContiguous = recoveryWorker.new RecoveryTaskContiguous(rBlock);
BlockRecoveryWorker.RecoveryTaskContiguous spyTask = spy(RecoveryTaskContiguous);
spyTask.recover();
}
DatanodeProtocol dnP = recoveryWorker.getActiveNamenodeForBP(POOL_ID);
verify(dnP).commitBlockSynchronization(block, RECOVERY_ID, 0, true, true, DatanodeID.EMPTY_ARRAY, null);
}
use of org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo in project hadoop by apache.
the class TestInterDatanodeProtocol method testInitReplicaRecovery.
/** Test
* {@link FsDatasetImpl#initReplicaRecovery(String, ReplicaMap, Block, long, long)}
*/
@Test
public void testInitReplicaRecovery() throws IOException {
final long firstblockid = 10000L;
final long gs = 7777L;
final long length = 22L;
final ReplicaMap map = new ReplicaMap(new AutoCloseableLock());
String bpid = "BP-TEST";
final Block[] blocks = new Block[5];
for (int i = 0; i < blocks.length; i++) {
blocks[i] = new Block(firstblockid + i, length, gs);
map.add(bpid, createReplicaInfo(blocks[i]));
}
{
//normal case
final Block b = blocks[0];
final ReplicaInfo originalInfo = map.get(bpid, b);
final long recoveryid = gs + 1;
final ReplicaRecoveryInfo recoveryInfo = FsDatasetImpl.initReplicaRecovery(bpid, map, blocks[0], recoveryid, DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
assertEquals(originalInfo, recoveryInfo);
final ReplicaUnderRecovery updatedInfo = (ReplicaUnderRecovery) map.get(bpid, b);
Assert.assertEquals(originalInfo.getBlockId(), updatedInfo.getBlockId());
Assert.assertEquals(recoveryid, updatedInfo.getRecoveryID());
//recover one more time
final long recoveryid2 = gs + 2;
final ReplicaRecoveryInfo recoveryInfo2 = FsDatasetImpl.initReplicaRecovery(bpid, map, blocks[0], recoveryid2, DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
assertEquals(originalInfo, recoveryInfo2);
final ReplicaUnderRecovery updatedInfo2 = (ReplicaUnderRecovery) map.get(bpid, b);
Assert.assertEquals(originalInfo.getBlockId(), updatedInfo2.getBlockId());
Assert.assertEquals(recoveryid2, updatedInfo2.getRecoveryID());
//case RecoveryInProgressException
try {
FsDatasetImpl.initReplicaRecovery(bpid, map, b, recoveryid, DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
Assert.fail();
} catch (RecoveryInProgressException ripe) {
System.out.println("GOOD: getting " + ripe);
}
}
{
// BlockRecoveryFI_01: replica not found
final long recoveryid = gs + 1;
final Block b = new Block(firstblockid - 1, length, gs);
ReplicaRecoveryInfo r = FsDatasetImpl.initReplicaRecovery(bpid, map, b, recoveryid, DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
Assert.assertNull("Data-node should not have this replica.", r);
}
{
// BlockRecoveryFI_02: "THIS IS NOT SUPPOSED TO HAPPEN" with recovery id < gs
final long recoveryid = gs - 1;
final Block b = new Block(firstblockid + 1, length, gs);
try {
FsDatasetImpl.initReplicaRecovery(bpid, map, b, recoveryid, DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
Assert.fail();
} catch (IOException ioe) {
System.out.println("GOOD: getting " + ioe);
}
}
// BlockRecoveryFI_03: Replica's gs is less than the block's gs
{
final long recoveryid = gs + 1;
final Block b = new Block(firstblockid, length, gs + 1);
try {
FsDatasetImpl.initReplicaRecovery(bpid, map, b, recoveryid, DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
fail("InitReplicaRecovery should fail because replica's " + "gs is less than the block's gs");
} catch (IOException e) {
e.getMessage().startsWith("replica.getGenerationStamp() < block.getGenerationStamp(), block=");
}
}
}
Aggregations