use of org.apache.hadoop.hdfs.server.datanode.FinalizedReplica in project hadoop by apache.
the class TestFsDatasetImpl method testDuplicateReplicaResolution.
@Test
public void testDuplicateReplicaResolution() throws IOException {
FsVolumeImpl fsv1 = Mockito.mock(FsVolumeImpl.class);
FsVolumeImpl fsv2 = Mockito.mock(FsVolumeImpl.class);
File f1 = new File("d1/block");
File f2 = new File("d2/block");
ReplicaInfo replicaOlder = new FinalizedReplica(1, 1, 1, fsv1, f1);
ReplicaInfo replica = new FinalizedReplica(1, 2, 2, fsv1, f1);
ReplicaInfo replicaSame = new FinalizedReplica(1, 2, 2, fsv1, f1);
ReplicaInfo replicaNewer = new FinalizedReplica(1, 3, 3, fsv1, f1);
ReplicaInfo replicaOtherOlder = new FinalizedReplica(1, 1, 1, fsv2, f2);
ReplicaInfo replicaOtherSame = new FinalizedReplica(1, 2, 2, fsv2, f2);
ReplicaInfo replicaOtherNewer = new FinalizedReplica(1, 3, 3, fsv2, f2);
// equivalent path so don't remove either
assertNull(BlockPoolSlice.selectReplicaToDelete(replicaSame, replica));
assertNull(BlockPoolSlice.selectReplicaToDelete(replicaOlder, replica));
assertNull(BlockPoolSlice.selectReplicaToDelete(replicaNewer, replica));
// keep latest found replica
assertSame(replica, BlockPoolSlice.selectReplicaToDelete(replicaOtherSame, replica));
assertSame(replicaOtherOlder, BlockPoolSlice.selectReplicaToDelete(replicaOtherOlder, replica));
assertSame(replica, BlockPoolSlice.selectReplicaToDelete(replicaOtherNewer, replica));
}
Aggregations