use of org.apache.hadoop.hbase.zookeeper.ZKWatcher in project hbase by apache.
the class TestMetaRegionLocationCache method testMetaRegionLocationCache.
/**
* Tests MetaRegionLocationCache's init procedure to make sure that it correctly watches the base
* znode for notifications.
*/
@Test
public void testMetaRegionLocationCache() throws Exception {
final String parentZnodeName = "/randomznodename";
Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, parentZnodeName);
ServerName sn = ServerName.valueOf("localhost", 1234, 5678);
try (ZKWatcher zkWatcher = new ZKWatcher(conf, null, null, true)) {
// A thread that repeatedly creates and drops an unrelated child znode. This is to simulate
// some ZK activity in the background.
MultithreadedTestUtil.TestContext ctx = new MultithreadedTestUtil.TestContext(conf);
ctx.addThread(new MultithreadedTestUtil.RepeatingTestThread(ctx) {
@Override
public void doAnAction() throws Exception {
final String testZnode = parentZnodeName + "/child";
ZKUtil.createNodeIfNotExistsAndWatch(zkWatcher, testZnode, testZnode.getBytes());
ZKUtil.deleteNode(zkWatcher, testZnode);
}
});
ctx.startThreads();
try {
MetaRegionLocationCache metaCache = new MetaRegionLocationCache(zkWatcher);
// meta znodes do not exist at this point, cache should be empty.
assertTrue(metaCache.getMetaRegionLocations().isEmpty());
// assignment.
for (int i = 0; i < 3; i++) {
// Updates the meta znodes.
MetaTableLocator.setMetaLocation(zkWatcher, sn, i, RegionState.State.OPEN);
}
// Wait until the meta cache is populated.
int iters = 0;
while (iters++ < 10) {
if (metaCache.getMetaRegionLocations().size() == 3) {
break;
}
Thread.sleep(1000);
}
List<HRegionLocation> metaLocations = metaCache.getMetaRegionLocations();
assertEquals(3, metaLocations.size());
for (HRegionLocation location : metaLocations) {
assertEquals(sn, location.getServerName());
}
} finally {
// clean up.
ctx.stop();
ZKUtil.deleteChildrenRecursively(zkWatcher, parentZnodeName);
}
}
}
use of org.apache.hadoop.hbase.zookeeper.ZKWatcher in project hbase by apache.
the class TestMetaWithReplicasBasic method testZookeeperNodesForReplicas.
@Test
public void testZookeeperNodesForReplicas() throws Exception {
// Checks all the znodes exist when meta's replicas are enabled
ZKWatcher zkw = TEST_UTIL.getZooKeeperWatcher();
Configuration conf = TEST_UTIL.getConfiguration();
String baseZNode = conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
String primaryMetaZnode = ZNodePaths.joinZNode(baseZNode, conf.get("zookeeper.znode.metaserver", "meta-region-server"));
// check that the data in the znode is parseable (this would also mean the znode exists)
byte[] data = ZKUtil.getData(zkw, primaryMetaZnode);
ProtobufUtil.toServerName(data);
for (int i = 1; i < 3; i++) {
String secZnode = ZNodePaths.joinZNode(baseZNode, conf.get("zookeeper.znode.metaserver", "meta-region-server") + "-" + i);
String str = zkw.getZNodePaths().getZNodeForReplica(i);
assertTrue(str.equals(secZnode));
// check that the data in the znode is parseable (this would also mean the znode exists)
data = ZKUtil.getData(zkw, secZnode);
ProtobufUtil.toServerName(data);
}
}
use of org.apache.hadoop.hbase.zookeeper.ZKWatcher in project hbase by apache.
the class TestMasterCoprocessorExceptionWithAbort method testExceptionFromCoprocessorWhenCreatingTable.
@Test
public void testExceptionFromCoprocessorWhenCreatingTable() throws IOException {
SingleProcessHBaseCluster cluster = UTIL.getHBaseCluster();
HMaster master = cluster.getMaster();
MasterCoprocessorHost host = master.getMasterCoprocessorHost();
BuggyMasterObserver cp = host.findCoprocessor(BuggyMasterObserver.class);
assertFalse("No table created yet", cp.wasCreateTableCalled());
// set a watch on the zookeeper /hbase/master node. If the master dies,
// the node will be deleted.
ZKWatcher zkw = new ZKWatcher(UTIL.getConfiguration(), "unittest", new Abortable() {
@Override
public void abort(String why, Throwable e) {
throw new RuntimeException("Fatal ZK error: " + why, e);
}
@Override
public boolean isAborted() {
return false;
}
});
MasterTracker masterTracker = new MasterTracker(zkw, "/hbase/master", new Abortable() {
@Override
public void abort(String why, Throwable e) {
throw new RuntimeException("Fatal ZK master tracker error, why=", e);
}
@Override
public boolean isAborted() {
return false;
}
});
masterTracker.start();
zkw.registerListener(masterTracker);
// Test (part of the) output that should have be printed by master when it aborts:
// (namely the part that shows the set of loaded coprocessors).
// In this test, there is only a single coprocessor (BuggyMasterObserver).
assertTrue(HMaster.getLoadedCoprocessors().contains(TestMasterCoprocessorExceptionWithAbort.BuggyMasterObserver.class.getName()));
CreateTableThread createTableThread = new CreateTableThread(UTIL);
// Attempting to create a table (using createTableThread above) triggers an NPE in BuggyMasterObserver.
// Master will then abort and the /hbase/master zk node will be deleted.
createTableThread.start();
// Wait up to 30 seconds for master's /hbase/master zk node to go away after master aborts.
for (int i = 0; i < 30; i++) {
if (masterTracker.masterZKNodeWasDeleted == true) {
break;
}
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
fail("InterruptedException while waiting for master zk node to " + "be deleted.");
}
}
assertTrue("Master aborted on coprocessor exception, as expected.", masterTracker.masterZKNodeWasDeleted);
createTableThread.interrupt();
try {
createTableThread.join(1000);
} catch (InterruptedException e) {
assertTrue("Ignoring InterruptedException while waiting for " + " createTableThread.join().", true);
}
}
use of org.apache.hadoop.hbase.zookeeper.ZKWatcher in project hbase by apache.
the class VerifyReplication method getPeerQuorumConfig.
private static Pair<ReplicationPeerConfig, Configuration> getPeerQuorumConfig(final Configuration conf, String peerId) throws IOException {
ZKWatcher localZKW = null;
try {
localZKW = new ZKWatcher(conf, "VerifyReplication", new Abortable() {
@Override
public void abort(String why, Throwable e) {
}
@Override
public boolean isAborted() {
return false;
}
});
ReplicationPeerStorage storage = ReplicationStorageFactory.getReplicationPeerStorage(localZKW, conf);
ReplicationPeerConfig peerConfig = storage.getPeerConfig(peerId);
return Pair.newPair(peerConfig, ReplicationUtils.getPeerClusterConfiguration(peerConfig, conf));
} catch (ReplicationException e) {
throw new IOException("An error occurred while trying to connect to the remote peer cluster", e);
} finally {
if (localZKW != null) {
localZKW.close();
}
}
}
use of org.apache.hadoop.hbase.zookeeper.ZKWatcher in project hbase by apache.
the class ZNodeClearer method clear.
/**
* Delete the master znode if its content (ServerName string) is the same
* as the one in the znode file. (env: HBASE_ZNODE_FILE). I case of master-rs
* colloaction we extract ServerName string from rsZnode path.(HBASE-14861)
* @return true on successful deletion, false otherwise.
*/
public static boolean clear(Configuration conf) {
Configuration tempConf = new Configuration(conf);
tempConf.setInt("zookeeper.recovery.retry", 0);
ZKWatcher zkw;
try {
zkw = new ZKWatcher(tempConf, "clean znode for master", new Abortable() {
@Override
public void abort(String why, Throwable e) {
}
@Override
public boolean isAborted() {
return false;
}
});
} catch (IOException e) {
LOG.warn("Can't connect to zookeeper to read the master znode", e);
return false;
}
String znodeFileContent;
try {
znodeFileContent = ZNodeClearer.readMyEphemeralNodeOnDisk();
return MasterAddressTracker.deleteIfEquals(zkw, znodeFileContent);
} catch (FileNotFoundException fnfe) {
// If no file, just keep going -- return success.
LOG.warn("Can't find the znode file; presume non-fatal", fnfe);
return true;
} catch (IOException e) {
LOG.warn("Can't read the content of the znode file", e);
return false;
} finally {
zkw.close();
}
}
Aggregations