use of org.apache.hadoop.hbase.master.HMaster in project hbase by apache.
the class MasterStatusServlet method doGet.
@Override
public void doGet(HttpServletRequest request, HttpServletResponse response) throws IOException {
HMaster master = (HMaster) getServletContext().getAttribute(HMaster.MASTER);
assert master != null : "No Master in context!";
response.setContentType("text/html");
Configuration conf = master.getConfiguration();
Map<String, Integer> frags = getFragmentationInfo(master, conf);
ServerName metaLocation = null;
List<ServerName> servers = null;
Set<ServerName> deadServers = null;
if (master.isActiveMaster()) {
metaLocation = getMetaLocationOrNull(master);
ServerManager serverManager = master.getServerManager();
if (serverManager != null) {
deadServers = serverManager.getDeadServers().copyServerNames();
servers = serverManager.getOnlineServersList();
}
}
MasterStatusTmpl tmpl = new MasterStatusTmpl().setFrags(frags).setMetaLocation(metaLocation).setServers(servers).setDeadServers(deadServers).setCatalogJanitorEnabled(master.isCatalogJanitorEnabled());
if (request.getParameter("filter") != null)
tmpl.setFilter(request.getParameter("filter"));
if (request.getParameter("format") != null)
tmpl.setFormat(request.getParameter("format"));
tmpl.render(response.getWriter(), master);
}
use of org.apache.hadoop.hbase.master.HMaster in project hbase by apache.
the class ReplicationLogCleaner method init.
@Override
public void init(Map<String, Object> params) {
super.init(params);
try {
if (MapUtils.isNotEmpty(params)) {
Object master = params.get(HMaster.MASTER);
if (master != null && master instanceof HMaster) {
zkw = ((HMaster) master).getZooKeeper();
shareZK = true;
}
}
if (zkw == null) {
zkw = new ZKWatcher(getConf(), "replicationLogCleaner", null);
}
this.queueStorage = ReplicationStorageFactory.getReplicationQueueStorage(zkw, getConf());
} catch (IOException e) {
LOG.error("Error while configuring " + this.getClass().getName(), e);
}
}
use of org.apache.hadoop.hbase.master.HMaster in project hbase by apache.
the class JVMClusterUtil method createMasterThread.
/**
* Creates a {@link MasterThread}.
* Call 'start' on the returned thread to make it run.
* @param c Configuration to use.
* @param hmc Class to create.
* @param index Used distinguishing the object returned.
* @throws IOException
* @return Master added.
*/
public static JVMClusterUtil.MasterThread createMasterThread(final Configuration c, final Class<? extends HMaster> hmc, final int index) throws IOException {
HMaster server;
try {
server = hmc.getConstructor(Configuration.class).newInstance(c);
} catch (InvocationTargetException ite) {
Throwable target = ite.getTargetException();
throw new RuntimeException("Failed construction of Master: " + hmc.toString() + ((target.getCause() != null) ? target.getCause().getMessage() : ""), target);
} catch (Exception e) {
throw new IOException(e);
}
// Needed if a master based registry is configured for internal cluster connections. Here, we
// just add the current master host port since we do not know other master addresses up front
// in mini cluster tests.
c.set(HConstants.MASTER_ADDRS_KEY, Preconditions.checkNotNull(server.getServerName().getAddress()).toString());
return new JVMClusterUtil.MasterThread(server, index);
}
use of org.apache.hadoop.hbase.master.HMaster in project hbase by apache.
the class TestAdmin1 method testMergeRegions.
@Test
public void testMergeRegions() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder.of("d")).build();
byte[][] splitRows = new byte[2][];
splitRows[0] = new byte[] { (byte) '3' };
splitRows[1] = new byte[] { (byte) '6' };
try {
TEST_UTIL.createTable(td, splitRows);
TEST_UTIL.waitTableAvailable(tableName);
List<RegionInfo> tableRegions;
RegionInfo regionA;
RegionInfo regionB;
RegionInfo regionC;
RegionInfo mergedChildRegion = null;
// merge with full name
tableRegions = ADMIN.getRegions(tableName);
assertEquals(3, ADMIN.getRegions(tableName).size());
regionA = tableRegions.get(0);
regionB = tableRegions.get(1);
regionC = tableRegions.get(2);
// TODO convert this to version that is synchronous (See HBASE-16668)
ADMIN.mergeRegionsAsync(regionA.getRegionName(), regionB.getRegionName(), false).get(60, TimeUnit.SECONDS);
tableRegions = ADMIN.getRegions(tableName);
assertEquals(2, tableRegions.size());
for (RegionInfo ri : tableRegions) {
if (regionC.compareTo(ri) != 0) {
mergedChildRegion = ri;
break;
}
}
assertNotNull(mergedChildRegion);
// Need to wait GC for merged child region is done.
HMaster services = TEST_UTIL.getHBaseCluster().getMaster();
CatalogJanitor cj = services.getCatalogJanitor();
assertTrue(cj.scan() > 0);
// Wait until all procedures settled down
while (!services.getMasterProcedureExecutor().getActiveProcIds().isEmpty()) {
Thread.sleep(200);
}
// TODO convert this to version that is synchronous (See HBASE-16668)
ADMIN.mergeRegionsAsync(regionC.getEncodedNameAsBytes(), mergedChildRegion.getEncodedNameAsBytes(), false).get(60, TimeUnit.SECONDS);
assertEquals(1, ADMIN.getRegions(tableName).size());
} finally {
ADMIN.disableTable(tableName);
ADMIN.deleteTable(tableName);
}
}
use of org.apache.hadoop.hbase.master.HMaster in project hbase by apache.
the class TestAsyncClusterAdminApi2 method testStop.
@Test
public void testStop() throws Exception {
HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
assertFalse(rs.isStopped());
admin.stopRegionServer(rs.getServerName()).join();
assertTrue(rs.isStopped());
HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster();
assertFalse(master.isStopped());
admin.stopMaster().join();
assertTrue(master.isStopped());
}
Aggregations