use of org.apache.hadoop.hbase.replication.ReplicationQueueStorage in project hbase by apache.
the class TestReplicationSourceManager method testCleanupFailoverQueues.
@Test
public void testCleanupFailoverQueues() throws Exception {
Server server = new DummyServer("hostname1.example.org");
ReplicationQueueStorage rq = ReplicationStorageFactory.getReplicationQueueStorage(server.getZooKeeper(), server.getConfiguration());
// populate some znodes in the peer znode
SortedSet<String> files = new TreeSet<>();
String group = "testgroup";
String file1 = group + "." + EnvironmentEdgeManager.currentTime() + ".log1";
String file2 = group + "." + EnvironmentEdgeManager.currentTime() + ".log2";
files.add(file1);
files.add(file2);
for (String file : files) {
rq.addWAL(server.getServerName(), "1", file);
}
Server s1 = new DummyServer("dummyserver1.example.org");
ReplicationPeers rp1 = ReplicationFactory.getReplicationPeers(s1.getZooKeeper(), s1.getConfiguration());
rp1.init();
manager.claimQueue(server.getServerName(), "1");
assertEquals(1, manager.getWalsByIdRecoveredQueues().size());
String id = "1-" + server.getServerName().getServerName();
assertEquals(files, manager.getWalsByIdRecoveredQueues().get(id).get(group));
ReplicationSourceInterface source = mock(ReplicationSourceInterface.class);
when(source.getQueueId()).thenReturn(id);
when(source.isRecovered()).thenReturn(true);
when(source.isSyncReplication()).thenReturn(false);
manager.cleanOldLogs(file2, false, source);
// log1 should be deleted
assertEquals(Sets.newHashSet(file2), manager.getWalsByIdRecoveredQueues().get(id).get(group));
}
use of org.apache.hadoop.hbase.replication.ReplicationQueueStorage in project hbase by apache.
the class TestReplicationSourceManager method testClaimQueues.
@Test
public void testClaimQueues() throws Exception {
Server server = new DummyServer("hostname0.example.org");
ReplicationQueueStorage rq = ReplicationStorageFactory.getReplicationQueueStorage(server.getZooKeeper(), server.getConfiguration());
// populate some znodes in the peer znode
files.add("log1");
files.add("log2");
for (String file : files) {
rq.addWAL(server.getServerName(), "1", file);
}
// create 3 DummyServers
Server s1 = new DummyServer("dummyserver1.example.org");
Server s2 = new DummyServer("dummyserver2.example.org");
Server s3 = new DummyServer("dummyserver3.example.org");
// create 3 DummyNodeFailoverWorkers
DummyNodeFailoverWorker w1 = new DummyNodeFailoverWorker(server.getServerName(), s1);
DummyNodeFailoverWorker w2 = new DummyNodeFailoverWorker(server.getServerName(), s2);
DummyNodeFailoverWorker w3 = new DummyNodeFailoverWorker(server.getServerName(), s3);
latch = new CountDownLatch(3);
// start the threads
w1.start();
w2.start();
w3.start();
// make sure only one is successful
int populatedMap = 0;
// wait for result now... till all the workers are done.
latch.await();
populatedMap += w1.isLogZnodesMapPopulated() + w2.isLogZnodesMapPopulated() + w3.isLogZnodesMapPopulated();
assertEquals(1, populatedMap);
server.abort("", null);
}
use of org.apache.hadoop.hbase.replication.ReplicationQueueStorage in project hbase by apache.
the class TestReplicationSourceManagerZkImpl method testNodeFailoverDeadServerParsing.
// Tests the naming convention of adopted queues for ReplicationQueuesZkImpl
@Test
public void testNodeFailoverDeadServerParsing() throws Exception {
Server server = new DummyServer("ec2-54-234-230-108.compute-1.amazonaws.com");
ReplicationQueueStorage queueStorage = ReplicationStorageFactory.getReplicationQueueStorage(server.getZooKeeper(), conf);
// populate some znodes in the peer znode
files.add("log1");
files.add("log2");
for (String file : files) {
queueStorage.addWAL(server.getServerName(), "1", file);
}
// create 3 DummyServers
Server s1 = new DummyServer("ip-10-8-101-114.ec2.internal");
Server s2 = new DummyServer("ec2-107-20-52-47.compute-1.amazonaws.com");
Server s3 = new DummyServer("ec2-23-20-187-167.compute-1.amazonaws.com");
// simulate three servers fail sequentially
ServerName serverName = server.getServerName();
List<String> unclaimed = queueStorage.getAllQueues(serverName);
queueStorage.claimQueue(serverName, unclaimed.get(0), s1.getServerName());
queueStorage.removeReplicatorIfQueueIsEmpty(serverName);
serverName = s1.getServerName();
unclaimed = queueStorage.getAllQueues(serverName);
queueStorage.claimQueue(serverName, unclaimed.get(0), s2.getServerName());
queueStorage.removeReplicatorIfQueueIsEmpty(serverName);
serverName = s2.getServerName();
unclaimed = queueStorage.getAllQueues(serverName);
String queue3 = queueStorage.claimQueue(serverName, unclaimed.get(0), s3.getServerName()).getFirst();
queueStorage.removeReplicatorIfQueueIsEmpty(serverName);
ReplicationQueueInfo replicationQueueInfo = new ReplicationQueueInfo(queue3);
List<ServerName> result = replicationQueueInfo.getDeadRegionServers();
// verify
assertTrue(result.contains(server.getServerName()));
assertTrue(result.contains(s1.getServerName()));
assertTrue(result.contains(s2.getServerName()));
server.stop("");
}
use of org.apache.hadoop.hbase.replication.ReplicationQueueStorage in project hbase by apache.
the class TestReplicationSource method testRecoveredReplicationSourceShipperGetPosition.
/**
* Test HBASE-20497
* Moved here from TestReplicationSource because doesn't need cluster.
*/
@Test
public void testRecoveredReplicationSourceShipperGetPosition() throws Exception {
String walGroupId = "fake-wal-group-id";
ServerName serverName = ServerName.valueOf("www.example.com", 12006, 1524679704418L);
ServerName deadServer = ServerName.valueOf("www.deadServer.com", 12006, 1524679704419L);
RecoveredReplicationSource source = mock(RecoveredReplicationSource.class);
Server server = mock(Server.class);
Mockito.when(server.getServerName()).thenReturn(serverName);
Mockito.when(source.getServer()).thenReturn(server);
Mockito.when(source.getServerWALsBelongTo()).thenReturn(deadServer);
ReplicationQueueStorage storage = mock(ReplicationQueueStorage.class);
Mockito.when(storage.getWALPosition(Mockito.eq(serverName), Mockito.any(), Mockito.any())).thenReturn(1001L);
Mockito.when(storage.getWALPosition(Mockito.eq(deadServer), Mockito.any(), Mockito.any())).thenReturn(-1L);
Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
conf.setInt("replication.source.maxretriesmultiplier", -1);
MetricsSource metricsSource = mock(MetricsSource.class);
doNothing().when(metricsSource).incrSizeOfLogQueue();
ReplicationSourceLogQueue logQueue = new ReplicationSourceLogQueue(conf, metricsSource, source);
logQueue.enqueueLog(new Path("/www/html/test"), walGroupId);
RecoveredReplicationSourceShipper shipper = new RecoveredReplicationSourceShipper(conf, walGroupId, logQueue, source, storage);
assertEquals(1001L, shipper.getStartPosition());
}
use of org.apache.hadoop.hbase.replication.ReplicationQueueStorage in project hbase by apache.
the class TestHBaseFsckReplication method test.
@Test
public void test() throws Exception {
ReplicationPeerStorage peerStorage = ReplicationStorageFactory.getReplicationPeerStorage(UTIL.getZooKeeperWatcher(), UTIL.getConfiguration());
ReplicationQueueStorage queueStorage = ReplicationStorageFactory.getReplicationQueueStorage(UTIL.getZooKeeperWatcher(), UTIL.getConfiguration());
String peerId1 = "1";
String peerId2 = "2";
peerStorage.addPeer(peerId1, ReplicationPeerConfig.newBuilder().setClusterKey("key").build(), true, SyncReplicationState.NONE);
peerStorage.addPeer(peerId2, ReplicationPeerConfig.newBuilder().setClusterKey("key").build(), true, SyncReplicationState.NONE);
for (int i = 0; i < 10; i++) {
queueStorage.addWAL(ServerName.valueOf("localhost", 10000 + i, 100000 + i), peerId1, "file-" + i);
}
queueStorage.addWAL(ServerName.valueOf("localhost", 10000, 100000), peerId2, "file");
HBaseFsck fsck = HbckTestingUtil.doFsck(UTIL.getConfiguration(), true);
HbckTestingUtil.assertNoErrors(fsck);
// should not remove anything since the replication peer is still alive
assertEquals(10, queueStorage.getListOfReplicators().size());
peerStorage.removePeer(peerId1);
// there should be orphan queues
assertEquals(10, queueStorage.getListOfReplicators().size());
fsck = HbckTestingUtil.doFsck(UTIL.getConfiguration(), false);
HbckTestingUtil.assertErrors(fsck, Stream.generate(() -> {
return ERROR_CODE.UNDELETED_REPLICATION_QUEUE;
}).limit(10).toArray(ERROR_CODE[]::new));
// should not delete anything when fix is false
assertEquals(10, queueStorage.getListOfReplicators().size());
fsck = HbckTestingUtil.doFsck(UTIL.getConfiguration(), true);
HbckTestingUtil.assertErrors(fsck, Stream.generate(() -> {
return ERROR_CODE.UNDELETED_REPLICATION_QUEUE;
}).limit(10).toArray(ERROR_CODE[]::new));
List<ServerName> replicators = queueStorage.getListOfReplicators();
// should not remove the server with queue for peerId2
assertEquals(1, replicators.size());
assertEquals(ServerName.valueOf("localhost", 10000, 100000), replicators.get(0));
for (String queueId : queueStorage.getAllQueues(replicators.get(0))) {
assertEquals(peerId2, queueId);
}
}
Aggregations