use of org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.BlockRecord in project hadoop by apache.
the class TestBlockRecovery method testSafeLength.
@Test(timeout = 60000)
public void testSafeLength() throws Exception {
// hard coded policy to work with hard coded test suite
ErasureCodingPolicy ecPolicy = ErasureCodingPolicyManager.getSystemPolicies()[0];
RecoveringStripedBlock rBlockStriped = new RecoveringStripedBlock(rBlock, new byte[9], ecPolicy);
BlockRecoveryWorker recoveryWorker = new BlockRecoveryWorker(dn);
BlockRecoveryWorker.RecoveryTaskStriped recoveryTask = recoveryWorker.new RecoveryTaskStriped(rBlockStriped);
for (int i = 0; i < blockLengthsSuite.length; i++) {
int[] blockLengths = blockLengthsSuite[i][0];
int safeLength = blockLengthsSuite[i][1][0];
Map<Long, BlockRecord> syncList = new HashMap<>();
for (int id = 0; id < blockLengths.length; id++) {
ReplicaRecoveryInfo rInfo = new ReplicaRecoveryInfo(id, blockLengths[id], 0, null);
syncList.put((long) id, new BlockRecord(null, null, rInfo));
}
Assert.assertEquals("BLOCK_LENGTHS_SUITE[" + i + "]", safeLength, recoveryTask.getSafeLength(syncList));
}
}
use of org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.BlockRecord in project hadoop by apache.
the class TestBlockRecovery method initBlockRecords.
private List<BlockRecord> initBlockRecords(DataNode spyDN) throws IOException {
List<BlockRecord> blocks = new ArrayList<BlockRecord>(1);
DatanodeRegistration dnR = dn.getDNRegistrationForBP(block.getBlockPoolId());
BlockRecord blockRecord = new BlockRecord(new DatanodeID(dnR), spyDN, new ReplicaRecoveryInfo(block.getBlockId(), block.getNumBytes(), block.getGenerationStamp(), ReplicaState.FINALIZED));
blocks.add(blockRecord);
return blocks;
}
use of org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.BlockRecord in project hadoop by apache.
the class TestBlockRecovery method testSyncReplicas.
/** Sync two replicas */
private void testSyncReplicas(ReplicaRecoveryInfo replica1, ReplicaRecoveryInfo replica2, InterDatanodeProtocol dn1, InterDatanodeProtocol dn2, long expectLen) throws IOException {
DatanodeInfo[] locs = new DatanodeInfo[] { mock(DatanodeInfo.class), mock(DatanodeInfo.class) };
RecoveringBlock rBlock = new RecoveringBlock(block, locs, RECOVERY_ID);
ArrayList<BlockRecord> syncList = new ArrayList<BlockRecord>(2);
BlockRecord record1 = new BlockRecord(DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234), dn1, replica1);
BlockRecord record2 = new BlockRecord(DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234), dn2, replica2);
syncList.add(record1);
syncList.add(record2);
when(dn1.updateReplicaUnderRecovery((ExtendedBlock) anyObject(), anyLong(), anyLong(), anyLong())).thenReturn("storage1");
when(dn2.updateReplicaUnderRecovery((ExtendedBlock) anyObject(), anyLong(), anyLong(), anyLong())).thenReturn("storage2");
BlockRecoveryWorker.RecoveryTaskContiguous RecoveryTaskContiguous = recoveryWorker.new RecoveryTaskContiguous(rBlock);
RecoveryTaskContiguous.syncBlock(syncList);
}
Aggregations