Search in sources :

Example 1 with RegionServerInfo

use of org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo in project hbase by apache.

the class RegionServerTracker method getServerInfo.

private RegionServerInfo getServerInfo(ServerName serverName) throws KeeperException, IOException {
    String nodePath = watcher.getZNodePaths().getRsPath(serverName);
    byte[] data;
    try {
        data = ZKUtil.getData(watcher, nodePath);
    } catch (InterruptedException e) {
        throw (InterruptedIOException) new InterruptedIOException().initCause(e);
    }
    if (data == null) {
        // we should receive a children changed event later and then we will expire it, so we still
        // need to add it to the region server set.
        LOG.warn("Server node {} does not exist, already dead?", serverName);
        return null;
    }
    if (data.length == 0 || !ProtobufUtil.isPBMagicPrefix(data)) {
        // this should not happen actually, unless we have bugs or someone has messed zk up.
        LOG.warn("Invalid data for region server node {} on zookeeper, data length = {}", serverName, data.length);
        return null;
    }
    RegionServerInfo.Builder builder = RegionServerInfo.newBuilder();
    int magicLen = ProtobufUtil.lengthOfPBMagic();
    ProtobufUtil.mergeFrom(builder, data, magicLen, data.length - magicLen);
    return builder.build();
}
Also used : InterruptedIOException(java.io.InterruptedIOException) RegionServerInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo)

Example 2 with RegionServerInfo

use of org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo in project hbase by apache.

the class RegionServerTracker method upgrade.

/**
 * Upgrade to active master mode, where besides tracking the changes of region server set, we will
 * also started to add new region servers to ServerManager and also schedule SCP if a region
 * server dies. Starts the tracking of online RegionServers. All RSes will be tracked after this
 * method is called.
 * <p/>
 * In this method, we will also construct the region server sets in {@link ServerManager}. If a
 * region server is dead between the crash of the previous master instance and the start of the
 * current master instance, we will schedule a SCP for it. This is done in
 * {@link ServerManager#findDeadServersAndProcess(Set, Set)}, we call it here under the lock
 * protection to prevent concurrency issues with server expiration operation.
 * @param deadServersFromPE the region servers which already have SCP associated.
 * @param liveServersFromWALDir the live region servers from wal directory.
 * @param splittingServersFromWALDir Servers whose WALs are being actively 'split'.
 */
public void upgrade(Set<ServerName> deadServersFromPE, Set<ServerName> liveServersFromWALDir, Set<ServerName> splittingServersFromWALDir) throws KeeperException, IOException {
    LOG.info("Upgrading RegionServerTracker to active master mode; {} have existing" + "ServerCrashProcedures, {} possibly 'live' servers, and {} 'splitting'.", deadServersFromPE.size(), liveServersFromWALDir.size(), splittingServersFromWALDir.size());
    // deadServersFromPE is made from a list of outstanding ServerCrashProcedures.
    // splittingServersFromWALDir are being actively split -- the directory in the FS ends in
    // '-SPLITTING'. Each splitting server should have a corresponding SCP. Log if not.
    splittingServersFromWALDir.stream().filter(s -> !deadServersFromPE.contains(s)).forEach(s -> LOG.error("{} has no matching ServerCrashProcedure", s));
    // create ServerNode for all possible live servers from wal directory
    liveServersFromWALDir.forEach(sn -> server.getAssignmentManager().getRegionStates().getOrCreateServer(sn));
    ServerManager serverManager = server.getServerManager();
    synchronized (this) {
        Set<ServerName> liveServers = regionServers;
        for (ServerName serverName : liveServers) {
            RegionServerInfo info = getServerInfo(serverName);
            ServerMetrics serverMetrics = info != null ? ServerMetricsBuilder.of(serverName, VersionInfoUtil.getVersionNumber(info.getVersionInfo()), info.getVersionInfo().getVersion()) : ServerMetricsBuilder.of(serverName);
            serverManager.checkAndRecordNewServer(serverName, serverMetrics);
        }
        serverManager.findDeadServersAndProcess(deadServersFromPE, liveServersFromWALDir);
        active = true;
    }
}
Also used : ZKListener(org.apache.hadoop.hbase.zookeeper.ZKListener) Logger(org.slf4j.Logger) ProtobufUtil(org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil) KeeperException(org.apache.zookeeper.KeeperException) ZKUtil(org.apache.hadoop.hbase.zookeeper.ZKUtil) VersionInfoUtil(org.apache.hadoop.hbase.client.VersionInfoUtil) ThreadFactoryBuilder(org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder) LoggerFactory(org.slf4j.LoggerFactory) Set(java.util.Set) ServerMetrics(org.apache.hadoop.hbase.ServerMetrics) Sets(org.apache.hbase.thirdparty.com.google.common.collect.Sets) IOException(java.io.IOException) ZKWatcher(org.apache.hadoop.hbase.zookeeper.ZKWatcher) InterruptedIOException(java.io.InterruptedIOException) Collectors(java.util.stream.Collectors) Executors(java.util.concurrent.Executors) CollectionUtils(org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils) List(java.util.List) InterfaceAudience(org.apache.yetus.audience.InterfaceAudience) RegionServerInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo) ServerMetricsBuilder(org.apache.hadoop.hbase.ServerMetricsBuilder) Collections(java.util.Collections) ExecutorService(java.util.concurrent.ExecutorService) ServerName(org.apache.hadoop.hbase.ServerName) RegionServerInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo) ServerName(org.apache.hadoop.hbase.ServerName) ServerMetrics(org.apache.hadoop.hbase.ServerMetrics)

Aggregations

InterruptedIOException (java.io.InterruptedIOException)2 RegionServerInfo (org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo)2 IOException (java.io.IOException)1 Collections (java.util.Collections)1 List (java.util.List)1 Set (java.util.Set)1 ExecutorService (java.util.concurrent.ExecutorService)1 Executors (java.util.concurrent.Executors)1 Collectors (java.util.stream.Collectors)1 ServerMetrics (org.apache.hadoop.hbase.ServerMetrics)1 ServerMetricsBuilder (org.apache.hadoop.hbase.ServerMetricsBuilder)1 ServerName (org.apache.hadoop.hbase.ServerName)1 VersionInfoUtil (org.apache.hadoop.hbase.client.VersionInfoUtil)1 ProtobufUtil (org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil)1 ZKListener (org.apache.hadoop.hbase.zookeeper.ZKListener)1 ZKUtil (org.apache.hadoop.hbase.zookeeper.ZKUtil)1 ZKWatcher (org.apache.hadoop.hbase.zookeeper.ZKWatcher)1 Sets (org.apache.hbase.thirdparty.com.google.common.collect.Sets)1 ThreadFactoryBuilder (org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder)1 CollectionUtils (org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils)1