use of org.apache.hadoop.hbase.zookeeper.ZKWatcher in project hbase by apache.
the class TestCompactionInDeadRegionServer method test.
@Test
public void test() throws Exception {
HRegionServer regionSvr = UTIL.getRSForFirstRegionInTable(TABLE_NAME);
HRegion region = regionSvr.getRegions(TABLE_NAME).get(0);
String regName = region.getRegionInfo().getEncodedName();
List<HRegion> metaRegs = regionSvr.getRegions(TableName.META_TABLE_NAME);
if (metaRegs != null && !metaRegs.isEmpty()) {
LOG.info("meta is on the same server: " + regionSvr);
// when region is on same server as hbase:meta, reassigning meta would abort the server
// since WAL is broken.
// so the region is moved to a different server
HRegionServer otherRs = UTIL.getOtherRegionServer(regionSvr);
UTIL.moveRegionAndWait(region.getRegionInfo(), otherRs.getServerName());
LOG.info("Moved region: " + regName + " to " + otherRs.getServerName());
}
HRegionServer rsToSuspend = UTIL.getRSForFirstRegionInTable(TABLE_NAME);
region = rsToSuspend.getRegions(TABLE_NAME).get(0);
ZKWatcher watcher = UTIL.getZooKeeperWatcher();
watcher.getRecoverableZooKeeper().delete(ZNodePaths.joinZNode(watcher.getZNodePaths().rsZNode, rsToSuspend.getServerName().toString()), -1);
LOG.info("suspending " + rsToSuspend);
UTIL.waitFor(60000, 1000, new ExplainingPredicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
for (RegionServerThread thread : UTIL.getHBaseCluster().getRegionServerThreads()) {
HRegionServer rs = thread.getRegionServer();
if (rs != rsToSuspend) {
return !rs.getRegions(TABLE_NAME).isEmpty();
}
}
return false;
}
@Override
public String explainFailure() throws Exception {
return "The region for " + TABLE_NAME + " is still on " + rsToSuspend.getServerName();
}
});
try {
region.compact(true);
fail("Should fail as our wal file has already been closed, " + "and walDir has also been renamed");
} catch (Exception e) {
LOG.debug("expected exception: ", e);
}
Table table = UTIL.getConnection().getTable(TABLE_NAME);
// should not hit FNFE
for (int i = 0; i < 20; i++) {
assertEquals(i, Bytes.toInt(table.get(new Get(Bytes.toBytes(i))).getValue(CF, CQ)));
}
}
use of org.apache.hadoop.hbase.zookeeper.ZKWatcher in project hbase by apache.
the class TestMasterNoCluster method tearDown.
@After
public void tearDown() throws KeeperException, ZooKeeperConnectionException, IOException {
// Make sure zk is clean before we run the next test.
ZKWatcher zkw = new ZKWatcher(TESTUTIL.getConfiguration(), "@Before", new Abortable() {
@Override
public void abort(String why, Throwable e) {
throw new RuntimeException(why, e);
}
@Override
public boolean isAborted() {
return false;
}
});
// Before fails sometimes so retry.
try {
TESTUTIL.waitFor(10000, (Waiter.Predicate<Exception>) () -> {
try {
ZKUtil.deleteNodeRecursively(zkw, zkw.getZNodePaths().baseZNode);
return true;
} catch (KeeperException.NotEmptyException e) {
LOG.info("Failed delete, retrying", e);
}
return false;
});
} catch (Exception e) {
LOG.info("Failed zk clear", e);
}
zkw.close();
}
use of org.apache.hadoop.hbase.zookeeper.ZKWatcher in project hbase by apache.
the class TestActiveMasterManager method testRestartMaster.
@Test
public void testRestartMaster() throws IOException, KeeperException {
try (ZKWatcher zk = new ZKWatcher(TEST_UTIL.getConfiguration(), "testActiveMasterManagerFromZK", null, true)) {
try {
ZKUtil.deleteNode(zk, zk.getZNodePaths().masterAddressZNode);
ZKUtil.deleteNode(zk, zk.getZNodePaths().clusterStateZNode);
} catch (KeeperException.NoNodeException nne) {
}
// Create the master node with a dummy address
ServerName master = ServerName.valueOf("localhost", 1, EnvironmentEdgeManager.currentTime());
// Should not have a master yet
DummyMaster dummyMaster = new DummyMaster(zk, master);
ClusterStatusTracker clusterStatusTracker = dummyMaster.getClusterStatusTracker();
ActiveMasterManager activeMasterManager = dummyMaster.getActiveMasterManager();
assertFalse(activeMasterManager.clusterHasActiveMaster.get());
assertFalse(activeMasterManager.getActiveMasterServerName().isPresent());
// First test becoming the active master uninterrupted
MonitoredTask status = Mockito.mock(MonitoredTask.class);
clusterStatusTracker.setClusterUp();
activeMasterManager.blockUntilBecomingActiveMaster(100, status);
assertTrue(activeMasterManager.clusterHasActiveMaster.get());
assertMaster(zk, master);
assertMaster(zk, activeMasterManager.getActiveMasterServerName().get());
// Now pretend master restart
DummyMaster secondDummyMaster = new DummyMaster(zk, master);
ActiveMasterManager secondActiveMasterManager = secondDummyMaster.getActiveMasterManager();
assertFalse(secondActiveMasterManager.clusterHasActiveMaster.get());
activeMasterManager.blockUntilBecomingActiveMaster(100, status);
assertTrue(activeMasterManager.clusterHasActiveMaster.get());
assertMaster(zk, master);
assertMaster(zk, activeMasterManager.getActiveMasterServerName().get());
assertMaster(zk, secondActiveMasterManager.getActiveMasterServerName().get());
}
}
use of org.apache.hadoop.hbase.zookeeper.ZKWatcher in project hbase by apache.
the class TestRegionServerHostname method testRegionServerHostnameReportedToMaster.
@Test
public void testRegionServerHostnameReportedToMaster() throws Exception {
TEST_UTIL.getConfiguration().setBoolean(HRegionServer.UNSAFE_RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY, true);
StartTestingClusterOption option = StartTestingClusterOption.builder().numMasters(NUM_MASTERS).numRegionServers(NUM_RS).numDataNodes(NUM_RS).build();
TEST_UTIL.startMiniCluster(option);
int expectedRS = NUM_RS;
try (ZKWatcher zkw = TEST_UTIL.getZooKeeperWatcher()) {
List<String> servers = ZKUtil.listChildrenNoWatch(zkw, zkw.getZNodePaths().rsZNode);
assertEquals(expectedRS, servers.size());
}
}
use of org.apache.hadoop.hbase.zookeeper.ZKWatcher in project hbase by apache.
the class TestRegionServerHostname method testRegionServerHostname.
@Test
public void testRegionServerHostname() throws Exception {
Enumeration<NetworkInterface> netInterfaceList = NetworkInterface.getNetworkInterfaces();
while (netInterfaceList.hasMoreElements()) {
NetworkInterface ni = netInterfaceList.nextElement();
Enumeration<InetAddress> addrList = ni.getInetAddresses();
// iterate through host addresses and use each as hostname
while (addrList.hasMoreElements()) {
InetAddress addr = addrList.nextElement();
if (addr.isLoopbackAddress() || addr.isLinkLocalAddress() || addr.isMulticastAddress() || !addr.isSiteLocalAddress()) {
continue;
}
String hostName = addr.getHostName();
LOG.info("Found " + hostName + " on " + ni + ", addr=" + addr);
TEST_UTIL.getConfiguration().set(DNS.MASTER_HOSTNAME_KEY, hostName);
TEST_UTIL.getConfiguration().set(DNS.UNSAFE_RS_HOSTNAME_KEY, hostName);
StartTestingClusterOption option = StartTestingClusterOption.builder().numMasters(NUM_MASTERS).numRegionServers(NUM_RS).numDataNodes(NUM_RS).build();
TEST_UTIL.startMiniCluster(option);
try {
ZKWatcher zkw = TEST_UTIL.getZooKeeperWatcher();
List<String> servers = ZKUtil.listChildrenNoWatch(zkw, zkw.getZNodePaths().rsZNode);
assertEquals(NUM_RS, servers.size());
for (String server : servers) {
assertTrue("From zookeeper: " + server + " hostname: " + hostName, server.startsWith(hostName.toLowerCase(Locale.ROOT) + ","));
}
zkw.close();
} finally {
TEST_UTIL.shutdownMiniCluster();
}
}
}
}
Aggregations