use of org.apache.hadoop.hbase.replication.ReplicationPeers in project hbase by apache.
the class TestReplicationSourceManager method testPeerRemovalCleanup.
/**
* Test whether calling removePeer() on a ReplicationSourceManager that failed on initializing the
* corresponding ReplicationSourceInterface correctly cleans up the corresponding
* replication queue and ReplicationPeer.
* See HBASE-16096.
* @throws Exception
*/
@Test
public void testPeerRemovalCleanup() throws Exception {
String replicationSourceImplName = conf.get("replication.replicationsource.implementation");
try {
DummyServer server = new DummyServer();
final ReplicationQueues rq = ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(server.getConfiguration(), server, server.getZooKeeper()));
rq.init(server.getServerName().toString());
// Purposely fail ReplicationSourceManager.addSource() by causing ReplicationSourceInterface
// initialization to throw an exception.
conf.set("replication.replicationsource.implementation", FailInitializeDummyReplicationSource.class.getName());
final ReplicationPeers rp = manager.getReplicationPeers();
// Set up the znode and ReplicationPeer for the fake peer
rp.registerPeer("FakePeer", new ReplicationPeerConfig().setClusterKey("localhost:1:/hbase"));
// Wait for the peer to get created and connected
Waiter.waitFor(conf, 20000, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return (rp.getConnectedPeer("FakePeer") != null);
}
});
// Make sure that the replication source was not initialized
List<ReplicationSourceInterface> sources = manager.getSources();
for (ReplicationSourceInterface source : sources) {
assertNotEquals("FakePeer", source.getPeerClusterId());
}
// Create a replication queue for the fake peer
rq.addLog("FakePeer", "FakeFile");
// Unregister peer, this should remove the peer and clear all queues associated with it
// Need to wait for the ReplicationTracker to pick up the changes and notify listeners.
rp.unregisterPeer("FakePeer");
Waiter.waitFor(conf, 20000, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
List<String> peers = rp.getAllPeerIds();
return (!rq.getAllQueues().contains("FakePeer")) && (rp.getConnectedPeer("FakePeer") == null) && (!peers.contains("FakePeer"));
}
});
} finally {
conf.set("replication.replicationsource.implementation", replicationSourceImplName);
}
}
use of org.apache.hadoop.hbase.replication.ReplicationPeers in project hbase by apache.
the class VerifyReplication method getPeerQuorumConfig.
private static Pair<ReplicationPeerConfig, Configuration> getPeerQuorumConfig(final Configuration conf) throws IOException {
ZooKeeperWatcher localZKW = null;
ReplicationPeerZKImpl peer = null;
try {
localZKW = new ZooKeeperWatcher(conf, "VerifyReplication", new Abortable() {
@Override
public void abort(String why, Throwable e) {
}
@Override
public boolean isAborted() {
return false;
}
});
ReplicationPeers rp = ReplicationFactory.getReplicationPeers(localZKW, conf, localZKW);
rp.init();
Pair<ReplicationPeerConfig, Configuration> pair = rp.getPeerConf(peerId);
if (pair == null) {
throw new IOException("Couldn't get peer conf!");
}
return pair;
} catch (ReplicationException e) {
throw new IOException("An error occurred while trying to connect to the remove peer cluster", e);
} finally {
if (peer != null) {
peer.close();
}
if (localZKW != null) {
localZKW.close();
}
}
}
use of org.apache.hadoop.hbase.replication.ReplicationPeers in project hbase by apache.
the class TestReplicationSourceManager method testCleanupFailoverQueues.
@Test
public void testCleanupFailoverQueues() throws Exception {
final Server server = new DummyServer("hostname1.example.org");
ReplicationQueues rq = ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(server.getConfiguration(), server, server.getZooKeeper()));
rq.init(server.getServerName().toString());
// populate some znodes in the peer znode
SortedSet<String> files = new TreeSet<>();
String group = "testgroup";
String file1 = group + ".log1";
String file2 = group + ".log2";
files.add(file1);
files.add(file2);
for (String file : files) {
rq.addLog("1", file);
}
Server s1 = new DummyServer("dummyserver1.example.org");
ReplicationQueues rq1 = ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(s1.getConfiguration(), s1, s1.getZooKeeper()));
rq1.init(s1.getServerName().toString());
ReplicationPeers rp1 = ReplicationFactory.getReplicationPeers(s1.getZooKeeper(), s1.getConfiguration(), s1);
rp1.init();
NodeFailoverWorker w1 = manager.new NodeFailoverWorker(server.getServerName().getServerName(), rq1, rp1, new UUID(new Long(1), new Long(2)));
w1.run();
assertEquals(1, manager.getWalsByIdRecoveredQueues().size());
String id = "1-" + server.getServerName().getServerName();
assertEquals(files, manager.getWalsByIdRecoveredQueues().get(id).get(group));
manager.cleanOldLogs(file2, id, true);
// log1 should be deleted
assertEquals(Sets.newHashSet(file2), manager.getWalsByIdRecoveredQueues().get(id).get(group));
}
use of org.apache.hadoop.hbase.replication.ReplicationPeers in project hbase by apache.
the class DumpReplicationQueues method dumpQueues.
public String dumpQueues(ClusterConnection connection, ZooKeeperWatcher zkw, Set<String> peerIds, boolean hdfs) throws Exception {
ReplicationQueuesClient queuesClient;
ReplicationPeers replicationPeers;
ReplicationQueues replicationQueues;
ReplicationTracker replicationTracker;
ReplicationQueuesClientArguments replicationArgs = new ReplicationQueuesClientArguments(getConf(), new WarnOnlyAbortable(), zkw);
StringBuilder sb = new StringBuilder();
queuesClient = ReplicationFactory.getReplicationQueuesClient(replicationArgs);
queuesClient.init();
replicationQueues = ReplicationFactory.getReplicationQueues(replicationArgs);
replicationPeers = ReplicationFactory.getReplicationPeers(zkw, getConf(), queuesClient, connection);
replicationTracker = ReplicationFactory.getReplicationTracker(zkw, replicationPeers, getConf(), new WarnOnlyAbortable(), new WarnOnlyStoppable());
List<String> liveRegionServers = replicationTracker.getListOfRegionServers();
// Loops each peer on each RS and dumps the queues
try {
List<String> regionservers = queuesClient.getListOfReplicators();
for (String regionserver : regionservers) {
List<String> queueIds = queuesClient.getAllQueues(regionserver);
replicationQueues.init(regionserver);
if (!liveRegionServers.contains(regionserver)) {
deadRegionServers.add(regionserver);
}
for (String queueId : queueIds) {
ReplicationQueueInfo queueInfo = new ReplicationQueueInfo(queueId);
List<String> wals = queuesClient.getLogsInQueue(regionserver, queueId);
if (!peerIds.contains(queueInfo.getPeerId())) {
deletedQueues.add(regionserver + "/" + queueId);
sb.append(formatQueue(regionserver, replicationQueues, queueInfo, queueId, wals, true, hdfs));
} else {
sb.append(formatQueue(regionserver, replicationQueues, queueInfo, queueId, wals, false, hdfs));
}
}
}
} catch (KeeperException ke) {
throw new IOException(ke);
}
return sb.toString();
}
Aggregations