use of org.apache.hadoop.hbase.TableName in project hbase by apache.
the class HMaster method listTableNames.
/**
* Returns the list of table names that match the specified request
* @param regex The regular expression to match against, or null if querying for all
* @param namespace the namespace to query, or null if querying for all
* @param includeSysTables False to match only against userspace tables
* @return the list of table names
*/
public List<TableName> listTableNames(final String namespace, final String regex, final boolean includeSysTables) throws IOException {
List<HTableDescriptor> htds = new ArrayList<>();
boolean bypass = cpHost != null ? cpHost.preGetTableNames(htds, regex) : false;
if (!bypass) {
htds = getTableDescriptors(htds, namespace, regex, null, includeSysTables);
if (cpHost != null)
cpHost.postGetTableNames(htds, regex);
}
List<TableName> result = new ArrayList<>(htds.size());
for (HTableDescriptor htd : htds) result.add(htd.getTableName());
return result;
}
use of org.apache.hadoop.hbase.TableName in project hbase by apache.
the class HMaster method getTableDescriptors.
/**
* @return list of table table descriptors after filtering by regex and whether to include system
* tables, etc.
* @throws IOException
*/
private List<HTableDescriptor> getTableDescriptors(final List<HTableDescriptor> htds, final String namespace, final String regex, final List<TableName> tableNameList, final boolean includeSysTables) throws IOException {
if (tableNameList == null || tableNameList.isEmpty()) {
// request for all TableDescriptors
Collection<HTableDescriptor> allHtds;
if (namespace != null && namespace.length() > 0) {
// Do a check on the namespace existence. Will fail if does not exist.
this.clusterSchemaService.getNamespace(namespace);
allHtds = tableDescriptors.getByNamespace(namespace).values();
} else {
allHtds = tableDescriptors.getAll().values();
}
for (HTableDescriptor desc : allHtds) {
if (tableStateManager.isTablePresent(desc.getTableName()) && (includeSysTables || !desc.getTableName().isSystemTable())) {
htds.add(desc);
}
}
} else {
for (TableName s : tableNameList) {
if (tableStateManager.isTablePresent(s)) {
HTableDescriptor desc = tableDescriptors.get(s);
if (desc != null) {
htds.add(desc);
}
}
}
}
// Retains only those matched by regular expression.
if (regex != null)
filterTablesByRegex(htds, Pattern.compile(regex));
return htds;
}
use of org.apache.hadoop.hbase.TableName in project hbase by apache.
the class HMaster method finishActiveMasterInitialization.
/**
* Finish initialization of HMaster after becoming the primary master.
*
* <ol>
* <li>Initialize master components - file system manager, server manager,
* assignment manager, region server tracker, etc</li>
* <li>Start necessary service threads - balancer, catalog janior,
* executor services, etc</li>
* <li>Set cluster as UP in ZooKeeper</li>
* <li>Wait for RegionServers to check-in</li>
* <li>Split logs and perform data recovery, if necessary</li>
* <li>Ensure assignment of meta/namespace regions<li>
* <li>Handle either fresh cluster start or master failover</li>
* </ol>
*/
private void finishActiveMasterInitialization(MonitoredTask status) throws IOException, InterruptedException, KeeperException, CoordinatedStateException {
activeMaster = true;
Thread zombieDetector = new Thread(new InitializationMonitor(this), "ActiveMasterInitializationMonitor-" + System.currentTimeMillis());
zombieDetector.start();
/*
* We are active master now... go initialize components we need to run.
* Note, there may be dross in zk from previous runs; it'll get addressed
* below after we determine if cluster startup or failover.
*/
status.setStatus("Initializing Master file system");
this.masterActiveTime = System.currentTimeMillis();
// TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring.
this.fileSystemManager = new MasterFileSystem(this);
this.walManager = new MasterWalManager(this);
// enable table descriptors cache
this.tableDescriptors.setCacheOn();
// set the META's descriptor to the correct replication
this.tableDescriptors.get(TableName.META_TABLE_NAME).setRegionReplication(conf.getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM));
// warm-up HTDs cache on master initialization
if (preLoadTableDescriptors) {
status.setStatus("Pre-loading table descriptors");
this.tableDescriptors.getAll();
}
// publish cluster ID
status.setStatus("Publishing Cluster ID in ZooKeeper");
ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId());
this.initLatch.countDown();
this.serverManager = createServerManager(this);
this.tableStateManager = new TableStateManager(this);
status.setStatus("Initializing ZK system trackers");
initializeZKBasedSystemTrackers();
// This is for backwards compatibility
// See HBASE-11393
status.setStatus("Update TableCFs node in ZNode");
TableCFsUpdater tableCFsUpdater = new TableCFsUpdater(zooKeeper, conf, this.clusterConnection);
tableCFsUpdater.update();
// initialize master side coprocessors before we start handling requests
status.setStatus("Initializing master coprocessors");
this.cpHost = new MasterCoprocessorHost(this, this.conf);
// start up all service threads.
status.setStatus("Initializing master service threads");
startServiceThreads();
// Wake up this server to check in
sleeper.skipSleepCycle();
// Wait for region servers to report in
status.setStatus("Wait for region servers to report in");
waitForRegionServers(status);
// get a list for previously failed RS which need log splitting work
// we recover hbase:meta region servers inside master initialization and
// handle other failed servers in SSH in order to start up master node ASAP
MasterMetaBootstrap metaBootstrap = createMetaBootstrap(this, status);
metaBootstrap.splitMetaLogsBeforeAssignment();
this.initializationBeforeMetaAssignment = true;
if (this.balancer instanceof FavoredNodesPromoter) {
favoredNodesManager = new FavoredNodesManager(this);
}
// Wait for regionserver to finish initialization.
if (BaseLoadBalancer.tablesOnMaster(conf)) {
waitForServerOnline();
}
//initialize load balancer
this.balancer.setMasterServices(this);
this.balancer.setClusterStatus(getClusterStatus());
this.balancer.initialize();
// in initializing the regionserver or the balancer.
if (isStopped())
return;
// Make sure meta assigned before proceeding.
status.setStatus("Assigning Meta Region");
metaBootstrap.assignMeta();
// assigned when master is shutting down
if (isStopped())
return;
//Initialize after meta as it scans meta
if (favoredNodesManager != null) {
SnapshotOfRegionAssignmentFromMeta snapshotOfRegionAssignment = new SnapshotOfRegionAssignmentFromMeta(getConnection());
snapshotOfRegionAssignment.initialize();
favoredNodesManager.initialize(snapshotOfRegionAssignment);
}
// and recovery process treat states properly.
for (Map.Entry<TableName, TableState.State> entry : ZKDataMigrator.queryForTableStates(getZooKeeper()).entrySet()) {
LOG.info("Converting state from zk to new states:" + entry);
tableStateManager.setTableState(entry.getKey(), entry.getValue());
}
ZKUtil.deleteChildrenRecursively(getZooKeeper(), getZooKeeper().znodePaths.tableZNode);
status.setStatus("Submitting log splitting work for previously failed region servers");
metaBootstrap.processDeadServers();
// Fix up assignment manager status
status.setStatus("Starting assignment manager");
this.assignmentManager.joinCluster();
// set cluster status again after user regions are assigned
this.balancer.setClusterStatus(getClusterStatus());
// Start balancer and meta catalog janitor after meta and regions have been assigned.
status.setStatus("Starting balancer and catalog janitor");
this.clusterStatusChore = new ClusterStatusChore(this, balancer);
getChoreService().scheduleChore(clusterStatusChore);
this.balancerChore = new BalancerChore(this);
getChoreService().scheduleChore(balancerChore);
this.normalizerChore = new RegionNormalizerChore(this);
getChoreService().scheduleChore(normalizerChore);
this.catalogJanitorChore = new CatalogJanitor(this);
getChoreService().scheduleChore(catalogJanitorChore);
// Do Metrics periodically
periodicDoMetricsChore = new PeriodicDoMetrics(msgInterval, this);
getChoreService().scheduleChore(periodicDoMetricsChore);
status.setStatus("Starting cluster schema service");
initClusterSchemaService();
if (this.cpHost != null) {
try {
this.cpHost.preMasterInitialization();
} catch (IOException e) {
LOG.error("Coprocessor preMasterInitialization() hook failed", e);
}
}
status.markComplete("Initialization successful");
LOG.info("Master has completed initialization");
configurationManager.registerObserver(this.balancer);
// Set master as 'initialized'.
setInitialized(true);
status.setStatus("Assign meta replicas");
metaBootstrap.assignMetaReplicas();
status.setStatus("Starting quota manager");
initQuotaManager();
// clear the dead servers with same host name and port of online server because we are not
// removing dead server with same hostname and port of rs which is trying to check in before
// master initialization. See HBASE-5916.
this.serverManager.clearDeadServersWithSameHostNameAndPortOfOnlineServer();
// Check and set the znode ACLs if needed in case we are overtaking a non-secure configuration
status.setStatus("Checking ZNode ACLs");
zooKeeper.checkAndSetZNodeAcls();
status.setStatus("Initializing MOB Cleaner");
initMobCleaner();
status.setStatus("Calling postStartMaster coprocessors");
if (this.cpHost != null) {
// don't let cp initialization errors kill the master
try {
this.cpHost.postStartMaster();
} catch (IOException ioe) {
LOG.error("Coprocessor postStartMaster() hook failed", ioe);
}
}
zombieDetector.interrupt();
}
use of org.apache.hadoop.hbase.TableName in project hbase by apache.
the class RegionPlacementMaintainer method getNewAssignmentPlan.
public FavoredNodesPlan getNewAssignmentPlan() throws IOException {
// Get the current region assignment snapshot by scanning from the META
SnapshotOfRegionAssignmentFromMeta assignmentSnapshot = this.getRegionAssignmentSnapshot();
// Get the region locality map
Map<String, Map<String, Float>> regionLocalityMap = null;
if (this.enforceLocality) {
regionLocalityMap = FSUtils.getRegionDegreeLocalityMappingFromFS(conf);
}
// Initialize the assignment plan
FavoredNodesPlan plan = new FavoredNodesPlan();
// Get the table to region mapping
Map<TableName, List<HRegionInfo>> tableToRegionMap = assignmentSnapshot.getTableToRegionMap();
LOG.info("Start to generate the new assignment plan for the " + +tableToRegionMap.keySet().size() + " tables");
for (TableName table : tableToRegionMap.keySet()) {
try {
if (!this.targetTableSet.isEmpty() && !this.targetTableSet.contains(table)) {
continue;
}
// TODO: maybe run the placement in parallel for each table
genAssignmentPlan(table, assignmentSnapshot, regionLocalityMap, plan, USE_MUNKRES_FOR_PLACING_SECONDARY_AND_TERTIARY);
} catch (Exception e) {
LOG.error("Get some exceptions for placing primary region server" + "for table " + table + " because " + e);
}
}
LOG.info("Finish to generate the new assignment plan for the " + +tableToRegionMap.keySet().size() + " tables");
return plan;
}
use of org.apache.hadoop.hbase.TableName in project hbase by apache.
the class RegionPlacementMaintainer method getRegionsMovement.
/**
* Return how many regions will move per table since their primary RS will
* change
*
* @param newPlan - new AssignmentPlan
* @return how many primaries will move per table
*/
public Map<TableName, Integer> getRegionsMovement(FavoredNodesPlan newPlan) throws IOException {
Map<TableName, Integer> movesPerTable = new HashMap<>();
SnapshotOfRegionAssignmentFromMeta snapshot = this.getRegionAssignmentSnapshot();
Map<TableName, List<HRegionInfo>> tableToRegions = snapshot.getTableToRegionMap();
FavoredNodesPlan oldPlan = snapshot.getExistingAssignmentPlan();
Set<TableName> tables = snapshot.getTableSet();
for (TableName table : tables) {
int movedPrimaries = 0;
if (!this.targetTableSet.isEmpty() && !this.targetTableSet.contains(table)) {
continue;
}
List<HRegionInfo> regions = tableToRegions.get(table);
for (HRegionInfo region : regions) {
List<ServerName> oldServers = oldPlan.getFavoredNodes(region);
List<ServerName> newServers = newPlan.getFavoredNodes(region);
if (oldServers != null && newServers != null) {
ServerName oldPrimary = oldServers.get(0);
ServerName newPrimary = newServers.get(0);
if (oldPrimary.compareTo(newPrimary) != 0) {
movedPrimaries++;
}
}
}
movesPerTable.put(table, movedPrimaries);
}
return movesPerTable;
}
Aggregations