use of org.apache.hadoop.hbase.master.HMaster in project hbase by apache.
the class TestHBaseFsckTwoRS method testSidelineOverlapRegion.
/**
* This creates and fixes a bad table where an overlap group of
* 3 regions. Set HBaseFsck.maxMerge to 2 to trigger sideline overlapped
* region. Mess around the meta data so that closeRegion/offlineRegion
* throws exceptions.
*/
@Test(timeout = 180000)
public void testSidelineOverlapRegion() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
try {
setupTable(tableName);
assertEquals(ROWKEYS.length, countRows());
// Mess it up by creating an overlap
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
HMaster master = cluster.getMaster();
HRegionInfo hriOverlap1 = createRegion(tbl.getTableDescriptor(), Bytes.toBytes("A"), Bytes.toBytes("AB"));
TEST_UTIL.assignRegion(hriOverlap1);
HRegionInfo hriOverlap2 = createRegion(tbl.getTableDescriptor(), Bytes.toBytes("AB"), Bytes.toBytes("B"));
TEST_UTIL.assignRegion(hriOverlap2);
HBaseFsck hbck = doFsck(conf, false);
assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { HBaseFsck.ErrorReporter.ERROR_CODE.DUPE_STARTKEYS, HBaseFsck.ErrorReporter.ERROR_CODE.DUPE_STARTKEYS, HBaseFsck.ErrorReporter.ERROR_CODE.OVERLAP_IN_REGION_CHAIN });
assertEquals(3, hbck.getOverlapGroups(tableName).size());
assertEquals(ROWKEYS.length, countRows());
// mess around the overlapped regions, to trigger NotServingRegionException
Multimap<byte[], HBaseFsck.HbckInfo> overlapGroups = hbck.getOverlapGroups(tableName);
ServerName serverName = null;
byte[] regionName = null;
for (HBaseFsck.HbckInfo hbi : overlapGroups.values()) {
if ("A".equals(Bytes.toString(hbi.getStartKey())) && "B".equals(Bytes.toString(hbi.getEndKey()))) {
regionName = hbi.getRegionName();
// get an RS not serving the region to force bad assignment info in to META.
int k = cluster.getServerWith(regionName);
for (int i = 0; i < 3; i++) {
if (i != k) {
HRegionServer rs = cluster.getRegionServer(i);
serverName = rs.getServerName();
break;
}
}
HBaseFsckRepair.closeRegionSilentlyAndWait(connection, cluster.getRegionServer(k).getServerName(), hbi.getHdfsHRI());
admin.offline(regionName);
break;
}
}
assertNotNull(regionName);
assertNotNull(serverName);
try (Table meta = connection.getTable(TableName.META_TABLE_NAME, tableExecutorService)) {
Put put = new Put(regionName);
put.addColumn(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, Bytes.toBytes(serverName.getHostAndPort()));
meta.put(put);
}
// fix the problem.
HBaseFsck fsck = new HBaseFsck(conf, hbfsckExecutorService);
fsck.connect();
// i.e. -details
HBaseFsck.setDisplayFullReport();
fsck.setTimeLag(0);
fsck.setFixAssignments(true);
fsck.setFixMeta(true);
fsck.setFixHdfsHoles(true);
fsck.setFixHdfsOverlaps(true);
fsck.setFixHdfsOrphans(true);
fsck.setFixVersionFile(true);
fsck.setSidelineBigOverlaps(true);
fsck.setMaxMerge(2);
fsck.onlineHbck();
fsck.close();
// verify that overlaps are fixed, and there are less rows
// since one region is sidelined.
HBaseFsck hbck2 = doFsck(conf, false);
assertNoErrors(hbck2);
assertEquals(0, hbck2.getOverlapGroups(tableName).size());
assertTrue(ROWKEYS.length > countRows());
} finally {
cleanupTable(tableName);
}
}
use of org.apache.hadoop.hbase.master.HMaster in project phoenix by apache.
the class WALRecoveryRegionPostOpenIT method moveRegionAndWait.
private void moveRegionAndWait(MiniHBaseCluster miniHBaseCluster, HRegion destRegion, HRegionServer destRegionServer) throws IOException, InterruptedException {
HMaster master = miniHBaseCluster.getMaster();
getUtility().getHBaseAdmin().move(destRegion.getRegionInfo().getEncodedNameAsBytes(), Bytes.toBytes(destRegionServer.getServerName().getServerName()));
while (true) {
ServerName currentRegionServerName = master.getAssignmentManager().getRegionStates().getRegionServerOfRegion(destRegion.getRegionInfo());
if (currentRegionServerName != null && currentRegionServerName.equals(destRegionServer.getServerName())) {
getUtility().assertRegionOnServer(destRegion.getRegionInfo(), currentRegionServerName, 200);
break;
}
Thread.sleep(10);
}
}
use of org.apache.hadoop.hbase.master.HMaster in project hbase by apache.
the class HBaseServerBase method putUpWebUI.
/**
* Puts up the webui.
*/
private void putUpWebUI() throws IOException {
int port = this.conf.getInt(HConstants.REGIONSERVER_INFO_PORT, HConstants.DEFAULT_REGIONSERVER_INFOPORT);
String addr = this.conf.get("hbase.regionserver.info.bindAddress", "0.0.0.0");
if (this instanceof HMaster) {
port = conf.getInt(HConstants.MASTER_INFO_PORT, HConstants.DEFAULT_MASTER_INFOPORT);
addr = this.conf.get("hbase.master.info.bindAddress", "0.0.0.0");
}
// -1 is for disabling info server
if (port < 0) {
return;
}
if (!Addressing.isLocalAddress(InetAddress.getByName(addr))) {
String msg = "Failed to start http info server. Address " + addr + " does not belong to this host. Correct configuration parameter: " + "hbase.regionserver.info.bindAddress";
LOG.error(msg);
throw new IOException(msg);
}
// check if auto port bind enabled
boolean auto = this.conf.getBoolean(HConstants.REGIONSERVER_INFO_PORT_AUTO, false);
while (true) {
try {
this.infoServer = new InfoServer(getProcessName(), addr, port, false, this.conf);
infoServer.addPrivilegedServlet("dump", "/dump", getDumpServlet());
configureInfoServer(infoServer);
this.infoServer.start();
break;
} catch (BindException e) {
if (!auto) {
// auto bind disabled throw BindException
LOG.error("Failed binding http info server to port: " + port);
throw e;
}
// auto bind enabled, try to use another port
LOG.info("Failed binding http info server to port: " + port);
port++;
LOG.info("Retry starting http info server with port: " + port);
}
}
port = this.infoServer.getPort();
conf.setInt(HConstants.REGIONSERVER_INFO_PORT, port);
int masterInfoPort = conf.getInt(HConstants.MASTER_INFO_PORT, HConstants.DEFAULT_MASTER_INFOPORT);
conf.setInt("hbase.master.info.port.orig", masterInfoPort);
conf.setInt(HConstants.MASTER_INFO_PORT, port);
}
use of org.apache.hadoop.hbase.master.HMaster in project hbase by apache.
the class TestZooKeeper method testMasterZKSessionRecoveryFailure.
/**
* Master recovery when the znode already exists. Internally, this
* test differs from {@link #testMasterSessionExpired} because here
* the master znode will exist in ZK.
*/
@Test
public void testMasterZKSessionRecoveryFailure() throws Exception {
LOG.info("Starting " + name.getMethodName());
SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
HMaster m = cluster.getMaster();
m.abort("Test recovery from zk session expired", new KeeperException.SessionExpiredException());
// Master doesn't recover any more
assertTrue(m.isStopped());
testSanity(name.getMethodName());
}
use of org.apache.hadoop.hbase.master.HMaster in project hbase by apache.
the class TestInfoServers method testMasterServerReadOnly.
@Test
public void testMasterServerReadOnly() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
byte[] cf = Bytes.toBytes("d");
UTIL.createTable(tableName, cf);
UTIL.waitTableAvailable(tableName);
HMaster master = UTIL.getHBaseCluster().getMaster();
int port = master.getRegionServerInfoPort(master.getServerName());
assertDoesNotContainContent(new URL("http://localhost:" + port + "/table.jsp?name=" + tableName + "&action=split&key="), "Table action request accepted");
assertDoesNotContainContent(new URL("http://localhost:" + port + "/table.jsp?name=" + tableName), "Actions:");
}
Aggregations