Search in sources :

Example 11 with FinalizedReplica

use of org.apache.hadoop.hdfs.server.datanode.FinalizedReplica in project hadoop by apache.

the class TestFsDatasetImpl method testDuplicateReplicaResolution.

@Test
public void testDuplicateReplicaResolution() throws IOException {
    FsVolumeImpl fsv1 = Mockito.mock(FsVolumeImpl.class);
    FsVolumeImpl fsv2 = Mockito.mock(FsVolumeImpl.class);
    File f1 = new File("d1/block");
    File f2 = new File("d2/block");
    ReplicaInfo replicaOlder = new FinalizedReplica(1, 1, 1, fsv1, f1);
    ReplicaInfo replica = new FinalizedReplica(1, 2, 2, fsv1, f1);
    ReplicaInfo replicaSame = new FinalizedReplica(1, 2, 2, fsv1, f1);
    ReplicaInfo replicaNewer = new FinalizedReplica(1, 3, 3, fsv1, f1);
    ReplicaInfo replicaOtherOlder = new FinalizedReplica(1, 1, 1, fsv2, f2);
    ReplicaInfo replicaOtherSame = new FinalizedReplica(1, 2, 2, fsv2, f2);
    ReplicaInfo replicaOtherNewer = new FinalizedReplica(1, 3, 3, fsv2, f2);
    // equivalent path so don't remove either
    assertNull(BlockPoolSlice.selectReplicaToDelete(replicaSame, replica));
    assertNull(BlockPoolSlice.selectReplicaToDelete(replicaOlder, replica));
    assertNull(BlockPoolSlice.selectReplicaToDelete(replicaNewer, replica));
    // keep latest found replica
    assertSame(replica, BlockPoolSlice.selectReplicaToDelete(replicaOtherSame, replica));
    assertSame(replicaOtherOlder, BlockPoolSlice.selectReplicaToDelete(replicaOtherOlder, replica));
    assertSame(replica, BlockPoolSlice.selectReplicaToDelete(replicaOtherNewer, replica));
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) File(java.io.File) FinalizedReplica(org.apache.hadoop.hdfs.server.datanode.FinalizedReplica) Test(org.junit.Test)

Aggregations

FinalizedReplica (org.apache.hadoop.hdfs.server.datanode.FinalizedReplica)11 Test (org.junit.Test)7 ReplicaBeingWritten (org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten)3 File (java.io.File)2 Block (org.apache.hadoop.hdfs.protocol.Block)2 BlockReportReplica (org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica)2 Replica (org.apache.hadoop.hdfs.server.datanode.Replica)2 ReplicaInfo (org.apache.hadoop.hdfs.server.datanode.ReplicaInfo)2 ReplicaWaitingToBeRecovered (org.apache.hadoop.hdfs.server.datanode.ReplicaWaitingToBeRecovered)2 FsVolumeReferences (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences)2 BlockReportContext (org.apache.hadoop.hdfs.server.protocol.BlockReportContext)2 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)2 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)2 RpcController (com.google.protobuf.RpcController)1 FileInputStream (java.io.FileInputStream)1 RandomAccessFile (java.io.RandomAccessFile)1 ArrayList (java.util.ArrayList)1 Random (java.util.Random)1 AtomicReference (java.util.concurrent.atomic.AtomicReference)1 FileExistsException (org.apache.commons.io.FileExistsException)1