use of org.apache.hadoop.hbase.master.MasterServices in project hbase by apache.
the class ServerCrashProcedure method verifyAndAssignMetaWithRetries.
/**
* If hbase:meta is not assigned already, assign.
* @throws IOException
*/
private void verifyAndAssignMetaWithRetries(final MasterProcedureEnv env) throws IOException {
MasterServices services = env.getMasterServices();
int iTimes = services.getConfiguration().getInt(KEY_RETRIES_ON_META, DEFAULT_RETRIES_ON_META);
// Just reuse same time as we have for short wait on meta. Adding another config is overkill.
long waitTime = services.getConfiguration().getLong(KEY_SHORT_WAIT_ON_META, DEFAULT_SHORT_WAIT_ON_META);
int iFlag = 0;
while (true) {
try {
verifyAndAssignMeta(env);
break;
} catch (KeeperException e) {
services.abort("In server shutdown processing, assigning meta", e);
throw new IOException("Aborting", e);
} catch (Exception e) {
if (iFlag >= iTimes) {
services.abort("verifyAndAssignMeta failed after" + iTimes + " retries, aborting", e);
throw new IOException("Aborting", e);
}
try {
Thread.sleep(waitTime);
} catch (InterruptedException e1) {
LOG.warn("Interrupted when is the thread sleep", e1);
Thread.currentThread().interrupt();
throw (InterruptedIOException) new InterruptedIOException().initCause(e1);
}
iFlag++;
}
}
}
use of org.apache.hadoop.hbase.master.MasterServices in project hbase by apache.
the class CoprocessorWhitelistMasterObserver method verifyCoprocessors.
/**
* Perform the validation checks for a coprocessor to determine if the path
* is white listed or not.
* @throws IOException if path is not included in whitelist or a failure
* occurs in processing
* @param ctx as passed in from the coprocessor
* @param htd as passed in from the coprocessor
*/
private void verifyCoprocessors(ObserverContext<MasterCoprocessorEnvironment> ctx, HTableDescriptor htd) throws IOException {
MasterServices services = ctx.getEnvironment().getMasterServices();
Configuration conf = services.getConfiguration();
Collection<String> paths = conf.getStringCollection(CP_COPROCESSOR_WHITELIST_PATHS_KEY);
List<String> coprocs = htd.getCoprocessors();
for (int i = 0; i < coprocs.size(); i++) {
String coproc = coprocs.get(i);
String coprocSpec = Bytes.toString(htd.getValue(Bytes.toBytes("coprocessor$" + (i + 1))));
if (coprocSpec == null) {
continue;
}
// File path is the 1st field of the coprocessor spec
Matcher matcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(coprocSpec);
if (matcher == null || !matcher.matches()) {
continue;
}
String coprocPathStr = matcher.group(1).trim();
// Check if coprocessor is being loaded via the classpath (i.e. no file path)
if (coprocPathStr.equals("")) {
break;
}
Path coprocPath = new Path(coprocPathStr);
String coprocessorClass = matcher.group(2).trim();
boolean foundPathMatch = false;
for (String pathStr : paths) {
Path wlPath = new Path(pathStr);
try {
foundPathMatch = validatePath(coprocPath, wlPath, conf);
if (foundPathMatch == true) {
LOG.debug(String.format("Coprocessor %s found in directory %s", coprocessorClass, pathStr));
break;
}
} catch (IOException e) {
LOG.warn(String.format("Failed to validate white list path %s for coprocessor path %s", pathStr, coprocPathStr));
}
}
if (!foundPathMatch) {
throw new IOException(String.format("Loading %s DENIED in %s", coprocessorClass, CP_COPROCESSOR_WHITELIST_PATHS_KEY));
}
}
}
use of org.apache.hadoop.hbase.master.MasterServices in project hbase by apache.
the class BackupLogCleaner method init.
@Override
public void init(Map<String, Object> params) {
if (params != null && params.containsKey(HMaster.MASTER)) {
MasterServices master = (MasterServices) params.get(HMaster.MASTER);
conn = master.getConnection();
if (getConf() == null) {
super.setConf(conn.getConfiguration());
}
}
if (conn == null) {
try {
conn = ConnectionFactory.createConnection(getConf());
} catch (IOException ioe) {
throw new RuntimeException("Failed to create connection", ioe);
}
}
}
use of org.apache.hadoop.hbase.master.MasterServices in project hbase by apache.
the class EnableTableProcedure method markRegionsOnline.
/**
* Mark offline regions of the table online
* @param env MasterProcedureEnv
* @param tableName the target table
* @return whether the operation is fully completed or being interrupted.
* @throws IOException
*/
private static boolean markRegionsOnline(final MasterProcedureEnv env, final TableName tableName) throws IOException {
final AssignmentManager assignmentManager = env.getMasterServices().getAssignmentManager();
final MasterServices masterServices = env.getMasterServices();
final ServerManager serverManager = masterServices.getServerManager();
boolean done = false;
// Get the regions of this table. We're done when all listed
// tables are onlined.
List<Pair<HRegionInfo, ServerName>> tableRegionsAndLocations;
if (TableName.META_TABLE_NAME.equals(tableName)) {
tableRegionsAndLocations = new MetaTableLocator().getMetaRegionsAndLocations(masterServices.getZooKeeper());
} else {
tableRegionsAndLocations = MetaTableAccessor.getTableRegionsAndLocations(masterServices.getConnection(), tableName);
}
int countOfRegionsInTable = tableRegionsAndLocations.size();
Map<HRegionInfo, ServerName> regionsToAssign = regionsToAssignWithServerName(env, tableRegionsAndLocations);
// need to potentially create some regions for the replicas
List<HRegionInfo> unrecordedReplicas = AssignmentManager.replicaRegionsNotRecordedInMeta(new HashSet<>(regionsToAssign.keySet()), masterServices);
Map<ServerName, List<HRegionInfo>> srvToUnassignedRegs = assignmentManager.getBalancer().roundRobinAssignment(unrecordedReplicas, serverManager.getOnlineServersList());
if (srvToUnassignedRegs != null) {
for (Map.Entry<ServerName, List<HRegionInfo>> entry : srvToUnassignedRegs.entrySet()) {
for (HRegionInfo h : entry.getValue()) {
regionsToAssign.put(h, entry.getKey());
}
}
}
int offlineRegionsCount = regionsToAssign.size();
LOG.info("Table '" + tableName + "' has " + countOfRegionsInTable + " regions, of which " + offlineRegionsCount + " are offline.");
if (offlineRegionsCount == 0) {
return true;
}
List<ServerName> onlineServers = serverManager.createDestinationServersList();
Map<ServerName, List<HRegionInfo>> bulkPlan = env.getMasterServices().getAssignmentManager().getBalancer().retainAssignment(regionsToAssign, onlineServers);
if (bulkPlan != null) {
LOG.info("Bulk assigning " + offlineRegionsCount + " region(s) across " + bulkPlan.size() + " server(s), retainAssignment=true");
BulkAssigner ba = new GeneralBulkAssigner(masterServices, bulkPlan, assignmentManager, true);
try {
if (ba.bulkAssign()) {
done = true;
}
} catch (InterruptedException e) {
LOG.warn("Enable operation was interrupted when enabling table '" + tableName + "'");
// Preserve the interrupt.
Thread.currentThread().interrupt();
}
} else {
LOG.info("Balancer was unable to find suitable servers for table " + tableName + ", leaving unassigned");
}
return done;
}
use of org.apache.hadoop.hbase.master.MasterServices in project hbase by apache.
the class AbstractStateMachineTableProcedure method preflightChecks.
/**
* Check that cluster is up and master is running. Check table is modifiable.
* If <code>enabled</code>, check table is enabled else check it is disabled.
* Call in Procedure constructor so can pass any exception to caller.
* @param enabled If true, check table is enabled and throw exception if not. If false, do the
* inverse. If null, do no table checks.
*/
protected void preflightChecks(MasterProcedureEnv env, Boolean enabled) throws HBaseIOException {
MasterServices master = env.getMasterServices();
if (!master.isClusterUp()) {
throw new HBaseIOException("Cluster not up!");
}
if (master.isStopping() || master.isStopped()) {
throw new HBaseIOException("Master stopping=" + master.isStopping() + ", stopped=" + master.isStopped());
}
if (enabled == null) {
// Don't do any table checks.
return;
}
try {
// Checks table exists and is modifiable.
checkTableModifiable(env);
TableName tn = getTableName();
TableStateManager tsm = master.getTableStateManager();
TableState ts = tsm.getTableState(tn);
if (enabled) {
if (!ts.isEnabledOrEnabling()) {
throw new TableNotEnabledException(tn);
}
} else {
if (!ts.isDisabledOrDisabling()) {
throw new TableNotDisabledException(tn);
}
}
} catch (IOException ioe) {
if (ioe instanceof HBaseIOException) {
throw (HBaseIOException) ioe;
}
throw new HBaseIOException(ioe);
}
}
Aggregations