use of org.apache.hadoop.hbase.replication.ReplicationQueueInfo in project hbase by apache.
the class TestReplicationSourceManagerZkImpl method testNodeFailoverDeadServerParsing.
// Tests the naming convention of adopted queues for ReplicationQueuesZkImpl
@Test
public void testNodeFailoverDeadServerParsing() throws Exception {
Server server = new DummyServer("ec2-54-234-230-108.compute-1.amazonaws.com");
ReplicationQueueStorage queueStorage = ReplicationStorageFactory.getReplicationQueueStorage(server.getZooKeeper(), conf);
// populate some znodes in the peer znode
files.add("log1");
files.add("log2");
for (String file : files) {
queueStorage.addWAL(server.getServerName(), "1", file);
}
// create 3 DummyServers
Server s1 = new DummyServer("ip-10-8-101-114.ec2.internal");
Server s2 = new DummyServer("ec2-107-20-52-47.compute-1.amazonaws.com");
Server s3 = new DummyServer("ec2-23-20-187-167.compute-1.amazonaws.com");
// simulate three servers fail sequentially
ServerName serverName = server.getServerName();
List<String> unclaimed = queueStorage.getAllQueues(serverName);
queueStorage.claimQueue(serverName, unclaimed.get(0), s1.getServerName());
queueStorage.removeReplicatorIfQueueIsEmpty(serverName);
serverName = s1.getServerName();
unclaimed = queueStorage.getAllQueues(serverName);
queueStorage.claimQueue(serverName, unclaimed.get(0), s2.getServerName());
queueStorage.removeReplicatorIfQueueIsEmpty(serverName);
serverName = s2.getServerName();
unclaimed = queueStorage.getAllQueues(serverName);
String queue3 = queueStorage.claimQueue(serverName, unclaimed.get(0), s3.getServerName()).getFirst();
queueStorage.removeReplicatorIfQueueIsEmpty(serverName);
ReplicationQueueInfo replicationQueueInfo = new ReplicationQueueInfo(queue3);
List<ServerName> result = replicationQueueInfo.getDeadRegionServers();
// verify
assertTrue(result.contains(server.getServerName()));
assertTrue(result.contains(s1.getServerName()));
assertTrue(result.contains(s2.getServerName()));
server.stop("");
}
Aggregations