Search in sources :

Example 1 with MasterServices

use of org.apache.hadoop.hbase.master.MasterServices in project hbase by apache.

the class ServerCrashProcedure method verifyAndAssignMetaWithRetries.

/**
   * If hbase:meta is not assigned already, assign.
   * @throws IOException
   */
private void verifyAndAssignMetaWithRetries(final MasterProcedureEnv env) throws IOException {
    MasterServices services = env.getMasterServices();
    int iTimes = services.getConfiguration().getInt(KEY_RETRIES_ON_META, DEFAULT_RETRIES_ON_META);
    // Just reuse same time as we have for short wait on meta. Adding another config is overkill.
    long waitTime = services.getConfiguration().getLong(KEY_SHORT_WAIT_ON_META, DEFAULT_SHORT_WAIT_ON_META);
    int iFlag = 0;
    while (true) {
        try {
            verifyAndAssignMeta(env);
            break;
        } catch (KeeperException e) {
            services.abort("In server shutdown processing, assigning meta", e);
            throw new IOException("Aborting", e);
        } catch (Exception e) {
            if (iFlag >= iTimes) {
                services.abort("verifyAndAssignMeta failed after" + iTimes + " retries, aborting", e);
                throw new IOException("Aborting", e);
            }
            try {
                Thread.sleep(waitTime);
            } catch (InterruptedException e1) {
                LOG.warn("Interrupted when is the thread sleep", e1);
                Thread.currentThread().interrupt();
                throw (InterruptedIOException) new InterruptedIOException().initCause(e1);
            }
            iFlag++;
        }
    }
}
Also used : InterruptedIOException(java.io.InterruptedIOException) MasterServices(org.apache.hadoop.hbase.master.MasterServices) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) KeeperException(org.apache.zookeeper.KeeperException) InterruptedIOException(java.io.InterruptedIOException) ProcedureYieldException(org.apache.hadoop.hbase.procedure2.ProcedureYieldException) KeeperException(org.apache.zookeeper.KeeperException) IOException(java.io.IOException)

Example 2 with MasterServices

use of org.apache.hadoop.hbase.master.MasterServices in project hbase by apache.

the class CoprocessorWhitelistMasterObserver method verifyCoprocessors.

/**
   * Perform the validation checks for a coprocessor to determine if the path
   * is white listed or not.
   * @throws IOException if path is not included in whitelist or a failure
   *                     occurs in processing
   * @param  ctx         as passed in from the coprocessor
   * @param  htd         as passed in from the coprocessor
   */
private void verifyCoprocessors(ObserverContext<MasterCoprocessorEnvironment> ctx, HTableDescriptor htd) throws IOException {
    MasterServices services = ctx.getEnvironment().getMasterServices();
    Configuration conf = services.getConfiguration();
    Collection<String> paths = conf.getStringCollection(CP_COPROCESSOR_WHITELIST_PATHS_KEY);
    List<String> coprocs = htd.getCoprocessors();
    for (int i = 0; i < coprocs.size(); i++) {
        String coproc = coprocs.get(i);
        String coprocSpec = Bytes.toString(htd.getValue(Bytes.toBytes("coprocessor$" + (i + 1))));
        if (coprocSpec == null) {
            continue;
        }
        // File path is the 1st field of the coprocessor spec
        Matcher matcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(coprocSpec);
        if (matcher == null || !matcher.matches()) {
            continue;
        }
        String coprocPathStr = matcher.group(1).trim();
        // Check if coprocessor is being loaded via the classpath (i.e. no file path)
        if (coprocPathStr.equals("")) {
            break;
        }
        Path coprocPath = new Path(coprocPathStr);
        String coprocessorClass = matcher.group(2).trim();
        boolean foundPathMatch = false;
        for (String pathStr : paths) {
            Path wlPath = new Path(pathStr);
            try {
                foundPathMatch = validatePath(coprocPath, wlPath, conf);
                if (foundPathMatch == true) {
                    LOG.debug(String.format("Coprocessor %s found in directory %s", coprocessorClass, pathStr));
                    break;
                }
            } catch (IOException e) {
                LOG.warn(String.format("Failed to validate white list path %s for coprocessor path %s", pathStr, coprocPathStr));
            }
        }
        if (!foundPathMatch) {
            throw new IOException(String.format("Loading %s DENIED in %s", coprocessorClass, CP_COPROCESSOR_WHITELIST_PATHS_KEY));
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) Matcher(java.util.regex.Matcher) PathMatcher(java.nio.file.PathMatcher) MasterServices(org.apache.hadoop.hbase.master.MasterServices) IOException(java.io.IOException)

Example 3 with MasterServices

use of org.apache.hadoop.hbase.master.MasterServices in project hbase by apache.

the class BackupLogCleaner method init.

@Override
public void init(Map<String, Object> params) {
    if (params != null && params.containsKey(HMaster.MASTER)) {
        MasterServices master = (MasterServices) params.get(HMaster.MASTER);
        conn = master.getConnection();
        if (getConf() == null) {
            super.setConf(conn.getConfiguration());
        }
    }
    if (conn == null) {
        try {
            conn = ConnectionFactory.createConnection(getConf());
        } catch (IOException ioe) {
            throw new RuntimeException("Failed to create connection", ioe);
        }
    }
}
Also used : MasterServices(org.apache.hadoop.hbase.master.MasterServices) IOException(java.io.IOException)

Example 4 with MasterServices

use of org.apache.hadoop.hbase.master.MasterServices in project hbase by apache.

the class EnableTableProcedure method markRegionsOnline.

/**
   * Mark offline regions of the table online
   * @param env MasterProcedureEnv
   * @param tableName the target table
   * @return whether the operation is fully completed or being interrupted.
   * @throws IOException
   */
private static boolean markRegionsOnline(final MasterProcedureEnv env, final TableName tableName) throws IOException {
    final AssignmentManager assignmentManager = env.getMasterServices().getAssignmentManager();
    final MasterServices masterServices = env.getMasterServices();
    final ServerManager serverManager = masterServices.getServerManager();
    boolean done = false;
    // Get the regions of this table. We're done when all listed
    // tables are onlined.
    List<Pair<HRegionInfo, ServerName>> tableRegionsAndLocations;
    if (TableName.META_TABLE_NAME.equals(tableName)) {
        tableRegionsAndLocations = new MetaTableLocator().getMetaRegionsAndLocations(masterServices.getZooKeeper());
    } else {
        tableRegionsAndLocations = MetaTableAccessor.getTableRegionsAndLocations(masterServices.getConnection(), tableName);
    }
    int countOfRegionsInTable = tableRegionsAndLocations.size();
    Map<HRegionInfo, ServerName> regionsToAssign = regionsToAssignWithServerName(env, tableRegionsAndLocations);
    // need to potentially create some regions for the replicas
    List<HRegionInfo> unrecordedReplicas = AssignmentManager.replicaRegionsNotRecordedInMeta(new HashSet<>(regionsToAssign.keySet()), masterServices);
    Map<ServerName, List<HRegionInfo>> srvToUnassignedRegs = assignmentManager.getBalancer().roundRobinAssignment(unrecordedReplicas, serverManager.getOnlineServersList());
    if (srvToUnassignedRegs != null) {
        for (Map.Entry<ServerName, List<HRegionInfo>> entry : srvToUnassignedRegs.entrySet()) {
            for (HRegionInfo h : entry.getValue()) {
                regionsToAssign.put(h, entry.getKey());
            }
        }
    }
    int offlineRegionsCount = regionsToAssign.size();
    LOG.info("Table '" + tableName + "' has " + countOfRegionsInTable + " regions, of which " + offlineRegionsCount + " are offline.");
    if (offlineRegionsCount == 0) {
        return true;
    }
    List<ServerName> onlineServers = serverManager.createDestinationServersList();
    Map<ServerName, List<HRegionInfo>> bulkPlan = env.getMasterServices().getAssignmentManager().getBalancer().retainAssignment(regionsToAssign, onlineServers);
    if (bulkPlan != null) {
        LOG.info("Bulk assigning " + offlineRegionsCount + " region(s) across " + bulkPlan.size() + " server(s), retainAssignment=true");
        BulkAssigner ba = new GeneralBulkAssigner(masterServices, bulkPlan, assignmentManager, true);
        try {
            if (ba.bulkAssign()) {
                done = true;
            }
        } catch (InterruptedException e) {
            LOG.warn("Enable operation was interrupted when enabling table '" + tableName + "'");
            // Preserve the interrupt.
            Thread.currentThread().interrupt();
        }
    } else {
        LOG.info("Balancer was unable to find suitable servers for table " + tableName + ", leaving unassigned");
    }
    return done;
}
Also used : ServerManager(org.apache.hadoop.hbase.master.ServerManager) GeneralBulkAssigner(org.apache.hadoop.hbase.master.GeneralBulkAssigner) AssignmentManager(org.apache.hadoop.hbase.master.AssignmentManager) MasterServices(org.apache.hadoop.hbase.master.MasterServices) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) MetaTableLocator(org.apache.hadoop.hbase.zookeeper.MetaTableLocator) ServerName(org.apache.hadoop.hbase.ServerName) BulkAssigner(org.apache.hadoop.hbase.master.BulkAssigner) GeneralBulkAssigner(org.apache.hadoop.hbase.master.GeneralBulkAssigner) List(java.util.List) HashMap(java.util.HashMap) Map(java.util.Map) Pair(org.apache.hadoop.hbase.util.Pair)

Example 5 with MasterServices

use of org.apache.hadoop.hbase.master.MasterServices in project hbase by apache.

the class AbstractStateMachineTableProcedure method preflightChecks.

/**
 * Check that cluster is up and master is running. Check table is modifiable.
 * If <code>enabled</code>, check table is enabled else check it is disabled.
 * Call in Procedure constructor so can pass any exception to caller.
 * @param enabled If true, check table is enabled and throw exception if not. If false, do the
 *                inverse. If null, do no table checks.
 */
protected void preflightChecks(MasterProcedureEnv env, Boolean enabled) throws HBaseIOException {
    MasterServices master = env.getMasterServices();
    if (!master.isClusterUp()) {
        throw new HBaseIOException("Cluster not up!");
    }
    if (master.isStopping() || master.isStopped()) {
        throw new HBaseIOException("Master stopping=" + master.isStopping() + ", stopped=" + master.isStopped());
    }
    if (enabled == null) {
        // Don't do any table checks.
        return;
    }
    try {
        // Checks table exists and is modifiable.
        checkTableModifiable(env);
        TableName tn = getTableName();
        TableStateManager tsm = master.getTableStateManager();
        TableState ts = tsm.getTableState(tn);
        if (enabled) {
            if (!ts.isEnabledOrEnabling()) {
                throw new TableNotEnabledException(tn);
            }
        } else {
            if (!ts.isDisabledOrDisabling()) {
                throw new TableNotDisabledException(tn);
            }
        }
    } catch (IOException ioe) {
        if (ioe instanceof HBaseIOException) {
            throw (HBaseIOException) ioe;
        }
        throw new HBaseIOException(ioe);
    }
}
Also used : TableNotDisabledException(org.apache.hadoop.hbase.TableNotDisabledException) TableName(org.apache.hadoop.hbase.TableName) TableStateManager(org.apache.hadoop.hbase.master.TableStateManager) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) MasterServices(org.apache.hadoop.hbase.master.MasterServices) IOException(java.io.IOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) TableState(org.apache.hadoop.hbase.client.TableState) TableNotEnabledException(org.apache.hadoop.hbase.TableNotEnabledException)

Aggregations

MasterServices (org.apache.hadoop.hbase.master.MasterServices)24 IOException (java.io.IOException)9 TableName (org.apache.hadoop.hbase.TableName)8 Test (org.junit.Test)8 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)7 Map (java.util.Map)5 ServerName (org.apache.hadoop.hbase.ServerName)5 List (java.util.List)4 Configuration (org.apache.hadoop.conf.Configuration)4 Path (org.apache.hadoop.fs.Path)4 HashMap (java.util.HashMap)3 TableDescriptors (org.apache.hadoop.hbase.TableDescriptors)3 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)3 ArrayList (java.util.ArrayList)2 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)2 LogEntry (org.apache.hadoop.hbase.client.LogEntry)2 AssignmentManager (org.apache.hadoop.hbase.master.AssignmentManager)2 MasterFileSystem (org.apache.hadoop.hbase.master.MasterFileSystem)2 ServerManager (org.apache.hadoop.hbase.master.ServerManager)2 AssignmentManager (org.apache.hadoop.hbase.master.assignment.AssignmentManager)2