Search in sources :

Example 11 with ReplicationPeerDescription

use of org.apache.hadoop.hbase.replication.ReplicationPeerDescription in project hbase by apache.

the class DumpReplicationQueues method dumpReplicationQueues.

private int dumpReplicationQueues(DumpOptions opts) throws Exception {
    Configuration conf = getConf();
    Connection connection = ConnectionFactory.createConnection(conf);
    Admin admin = connection.getAdmin();
    ZKWatcher zkw = new ZKWatcher(conf, "DumpReplicationQueues" + EnvironmentEdgeManager.currentTime(), new WarnOnlyAbortable(), true);
    try {
        // Our zk watcher
        LOG.info("Our Quorum: " + zkw.getQuorum());
        List<TableCFs> replicatedTableCFs = admin.listReplicatedTableCFs();
        if (replicatedTableCFs.isEmpty()) {
            LOG.info("No tables with a configured replication peer were found.");
            return (0);
        } else {
            LOG.info("Replicated Tables: " + replicatedTableCFs);
        }
        List<ReplicationPeerDescription> peers = admin.listReplicationPeers();
        if (peers.isEmpty()) {
            LOG.info("Replication is enabled but no peer configuration was found.");
        }
        System.out.println("Dumping replication peers and configurations:");
        System.out.println(dumpPeersState(peers));
        if (opts.isDistributed()) {
            LOG.info("Found [--distributed], will poll each RegionServer.");
            Set<String> peerIds = peers.stream().map((peer) -> peer.getPeerId()).collect(Collectors.toSet());
            System.out.println(dumpQueues(zkw, peerIds, opts.isHdfs()));
            System.out.println(dumpReplicationSummary());
        } else {
            // use ZK instead
            System.out.print("Dumping replication znodes via ZooKeeper:");
            System.out.println(ZKDump.getReplicationZnodesDump(zkw));
        }
        return (0);
    } catch (IOException e) {
        return (-1);
    } finally {
        zkw.close();
    }
}
Also used : StringUtils(org.apache.hadoop.hbase.procedure2.util.StringUtils) Arrays(java.util.Arrays) FileSystem(org.apache.hadoop.fs.FileSystem) ReplicationStorageFactory(org.apache.hadoop.hbase.replication.ReplicationStorageFactory) LoggerFactory(org.slf4j.LoggerFactory) FileStatus(org.apache.hadoop.fs.FileStatus) ReplicationQueueStorage(org.apache.hadoop.hbase.replication.ReplicationQueueStorage) ArrayList(java.util.ArrayList) WALLink(org.apache.hadoop.hbase.io.WALLink) ReplicationQueueInfo(org.apache.hadoop.hbase.replication.ReplicationQueueInfo) AtomicLongMap(org.apache.hbase.thirdparty.com.google.common.util.concurrent.AtomicLongMap) Configured(org.apache.hadoop.conf.Configured) Map(java.util.Map) Configuration(org.apache.hadoop.conf.Configuration) TableCFs(org.apache.hadoop.hbase.client.replication.TableCFs) LinkedList(java.util.LinkedList) ServerName(org.apache.hadoop.hbase.ServerName) Logger(org.slf4j.Logger) ZKUtil(org.apache.hadoop.hbase.zookeeper.ZKUtil) Abortable(org.apache.hadoop.hbase.Abortable) ToolRunner(org.apache.hadoop.util.ToolRunner) Set(java.util.Set) ReplicationPeerConfig(org.apache.hadoop.hbase.replication.ReplicationPeerConfig) IOException(java.io.IOException) ZKWatcher(org.apache.hadoop.hbase.zookeeper.ZKWatcher) Collectors(java.util.stream.Collectors) FileNotFoundException(java.io.FileNotFoundException) ConnectionFactory(org.apache.hadoop.hbase.client.ConnectionFactory) Tool(org.apache.hadoop.util.Tool) List(java.util.List) InterfaceAudience(org.apache.yetus.audience.InterfaceAudience) Admin(org.apache.hadoop.hbase.client.Admin) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) Connection(org.apache.hadoop.hbase.client.Connection) EnvironmentEdgeManager(org.apache.hadoop.hbase.util.EnvironmentEdgeManager) ReplicationPeerDescription(org.apache.hadoop.hbase.replication.ReplicationPeerDescription) Queue(java.util.Queue) Collections(java.util.Collections) ZKDump(org.apache.hadoop.hbase.zookeeper.ZKDump) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) Connection(org.apache.hadoop.hbase.client.Connection) IOException(java.io.IOException) Admin(org.apache.hadoop.hbase.client.Admin) ZKWatcher(org.apache.hadoop.hbase.zookeeper.ZKWatcher) TableCFs(org.apache.hadoop.hbase.client.replication.TableCFs) ReplicationPeerDescription(org.apache.hadoop.hbase.replication.ReplicationPeerDescription)

Example 12 with ReplicationPeerDescription

use of org.apache.hadoop.hbase.replication.ReplicationPeerDescription in project hbase by apache.

the class HBaseFsck method cleanReplicationBarrier.

public void cleanReplicationBarrier() throws IOException {
    if (!cleanReplicationBarrier || cleanReplicationBarrierTable == null) {
        return;
    }
    if (cleanReplicationBarrierTable.isSystemTable()) {
        errors.reportError(ERROR_CODE.INVALID_TABLE, "invalid table: " + cleanReplicationBarrierTable);
        return;
    }
    boolean isGlobalScope = false;
    try {
        isGlobalScope = admin.getDescriptor(cleanReplicationBarrierTable).hasGlobalReplicationScope();
    } catch (TableNotFoundException e) {
        LOG.info("we may need to clean some erroneous data due to bugs");
    }
    if (isGlobalScope) {
        errors.reportError(ERROR_CODE.INVALID_TABLE, "table's replication scope is global: " + cleanReplicationBarrierTable);
        return;
    }
    List<byte[]> regionNames = new ArrayList<>();
    Scan barrierScan = new Scan();
    barrierScan.setCaching(100);
    barrierScan.addFamily(HConstants.REPLICATION_BARRIER_FAMILY);
    barrierScan.withStartRow(ClientMetaTableAccessor.getTableStartRowForMeta(cleanReplicationBarrierTable, ClientMetaTableAccessor.QueryType.REGION)).withStopRow(ClientMetaTableAccessor.getTableStopRowForMeta(cleanReplicationBarrierTable, ClientMetaTableAccessor.QueryType.REGION));
    Result result;
    try (ResultScanner scanner = meta.getScanner(barrierScan)) {
        while ((result = scanner.next()) != null) {
            regionNames.add(result.getRow());
        }
    }
    if (regionNames.size() <= 0) {
        errors.reportError(ERROR_CODE.INVALID_TABLE, "there is no barriers of this table: " + cleanReplicationBarrierTable);
        return;
    }
    ReplicationQueueStorage queueStorage = ReplicationStorageFactory.getReplicationQueueStorage(zkw, getConf());
    List<ReplicationPeerDescription> peerDescriptions = admin.listReplicationPeers();
    if (peerDescriptions != null && peerDescriptions.size() > 0) {
        List<String> peers = peerDescriptions.stream().filter(peerConfig -> peerConfig.getPeerConfig().needToReplicate(cleanReplicationBarrierTable)).map(peerConfig -> peerConfig.getPeerId()).collect(Collectors.toList());
        try {
            List<String> batch = new ArrayList<>();
            for (String peer : peers) {
                for (byte[] regionName : regionNames) {
                    batch.add(RegionInfo.encodeRegionName(regionName));
                    if (batch.size() % 100 == 0) {
                        queueStorage.removeLastSequenceIds(peer, batch);
                        batch.clear();
                    }
                }
                if (batch.size() > 0) {
                    queueStorage.removeLastSequenceIds(peer, batch);
                    batch.clear();
                }
            }
        } catch (ReplicationException re) {
            throw new IOException(re);
        }
    }
    for (byte[] regionName : regionNames) {
        meta.delete(new Delete(regionName).addFamily(HConstants.REPLICATION_BARRIER_FAMILY));
    }
    setShouldRerun();
}
Also used : UserProvider(org.apache.hadoop.hbase.security.UserProvider) FileSystem(org.apache.hadoop.fs.FileSystem) ThreadFactoryBuilder(org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder) WALSplitUtil(org.apache.hadoop.hbase.wal.WALSplitUtil) ClusterMetrics(org.apache.hadoop.hbase.ClusterMetrics) FileStatus(org.apache.hadoop.fs.FileStatus) StringUtils(org.apache.commons.lang3.StringUtils) RegionState(org.apache.hadoop.hbase.master.RegionState) InetAddress(java.net.InetAddress) Future(java.util.concurrent.Future) Vector(java.util.Vector) Delete(org.apache.hadoop.hbase.client.Delete) Map(java.util.Map) Configuration(org.apache.hadoop.conf.Configuration) ZNodePaths(org.apache.hadoop.hbase.zookeeper.ZNodePaths) AlreadyBeingCreatedException(org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException) Joiner(org.apache.hbase.thirdparty.com.google.common.base.Joiner) Closeables(org.apache.hbase.thirdparty.com.google.common.io.Closeables) EnumSet(java.util.EnumSet) Cell(org.apache.hadoop.hbase.Cell) PrintWriter(java.io.PrintWriter) ZKUtil(org.apache.hadoop.hbase.zookeeper.ZKUtil) Get(org.apache.hadoop.hbase.client.Get) Set(java.util.Set) Executors(java.util.concurrent.Executors) IOUtils(org.apache.commons.io.IOUtils) ReplicationPeerDescription(org.apache.hadoop.hbase.replication.ReplicationPeerDescription) ReplicationStorageFactory(org.apache.hadoop.hbase.replication.ReplicationStorageFactory) FutureTask(java.util.concurrent.FutureTask) Callable(java.util.concurrent.Callable) InterruptedIOException(java.io.InterruptedIOException) FsAction(org.apache.hadoop.fs.permission.FsAction) ArrayList(java.util.ArrayList) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) TableDescriptorBuilder(org.apache.hadoop.hbase.client.TableDescriptorBuilder) ColumnFamilyDescriptorBuilder(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) StoreFileInfo(org.apache.hadoop.hbase.regionserver.StoreFileInfo) ZooKeeperConnectionException(org.apache.hadoop.hbase.ZooKeeperConnectionException) ServerName(org.apache.hadoop.hbase.ServerName) Option(org.apache.hadoop.hbase.ClusterMetrics.Option) TableName(org.apache.hadoop.hbase.TableName) StringWriter(java.io.StringWriter) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Sets(org.apache.hbase.thirdparty.com.google.common.collect.Sets) IOException(java.io.IOException) ConnectionFactory(org.apache.hadoop.hbase.client.ConnectionFactory) Scan(org.apache.hadoop.hbase.client.Scan) ExecutionException(java.util.concurrent.ExecutionException) TreeMap(java.util.TreeMap) Admin(org.apache.hadoop.hbase.client.Admin) Connection(org.apache.hadoop.hbase.client.Connection) ReflectionUtils(org.apache.hadoop.util.ReflectionUtils) CatalogFamilyFormat(org.apache.hadoop.hbase.CatalogFamilyFormat) TableNotFoundException(org.apache.hadoop.hbase.TableNotFoundException) Result(org.apache.hadoop.hbase.client.Result) RegionLocations(org.apache.hadoop.hbase.RegionLocations) LoggerFactory(org.slf4j.LoggerFactory) TimeoutException(java.util.concurrent.TimeoutException) HFileLink(org.apache.hadoop.hbase.io.HFileLink) ReplicationQueueStorage(org.apache.hadoop.hbase.replication.ReplicationQueueStorage) FsPermission(org.apache.hadoop.fs.permission.FsPermission) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Locale(java.util.Locale) Configured(org.apache.hadoop.conf.Configured) TableIntegrityErrorHandler(org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler) Path(org.apache.hadoop.fs.Path) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) URI(java.net.URI) ReplicationException(org.apache.hadoop.hbase.replication.ReplicationException) HFile(org.apache.hadoop.hbase.io.hfile.HFile) ByteArrayComparator(org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator) Collection(java.util.Collection) Abortable(org.apache.hadoop.hbase.Abortable) ScheduledThreadPoolExecutor(java.util.concurrent.ScheduledThreadPoolExecutor) HBaseInterfaceAudience(org.apache.hadoop.hbase.HBaseInterfaceAudience) ZKWatcher(org.apache.hadoop.hbase.zookeeper.ZKWatcher) Collectors(java.util.stream.Collectors) FileNotFoundException(java.io.FileNotFoundException) Tool(org.apache.hadoop.util.Tool) Objects(java.util.Objects) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) List(java.util.List) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) FileLink(org.apache.hadoop.hbase.io.FileLink) Entry(java.util.Map.Entry) Optional(java.util.Optional) RowMutations(org.apache.hadoop.hbase.client.RowMutations) MetaTableAccessor(org.apache.hadoop.hbase.MetaTableAccessor) RegionInfoBuilder(org.apache.hadoop.hbase.client.RegionInfoBuilder) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) SortedMap(java.util.SortedMap) MasterNotRunningException(org.apache.hadoop.hbase.MasterNotRunningException) Preconditions(org.apache.hbase.thirdparty.com.google.common.base.Preconditions) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) TableState(org.apache.hadoop.hbase.client.TableState) HashSet(java.util.HashSet) InterfaceStability(org.apache.yetus.audience.InterfaceStability) HConstants(org.apache.hadoop.hbase.HConstants) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) HFileCorruptionChecker(org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker) ReplicationChecker(org.apache.hadoop.hbase.util.hbck.ReplicationChecker) ExecutorService(java.util.concurrent.ExecutorService) KeyValue(org.apache.hadoop.hbase.KeyValue) Logger(org.slf4j.Logger) Iterator(java.util.Iterator) KeeperException(org.apache.zookeeper.KeeperException) Put(org.apache.hadoop.hbase.client.Put) ToolRunner(org.apache.hadoop.util.ToolRunner) RegionReplicaUtil(org.apache.hadoop.hbase.client.RegionReplicaUtil) RemoteException(org.apache.hadoop.ipc.RemoteException) Lists(org.apache.hbase.thirdparty.com.google.common.collect.Lists) ClientMetaTableAccessor(org.apache.hadoop.hbase.ClientMetaTableAccessor) TimeUnit(java.util.concurrent.TimeUnit) CellUtil(org.apache.hadoop.hbase.CellUtil) ConcurrentSkipListMap(java.util.concurrent.ConcurrentSkipListMap) ERROR_CODE(org.apache.hadoop.hbase.util.HbckErrorReporter.ERROR_CODE) InterfaceAudience(org.apache.yetus.audience.InterfaceAudience) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) AccessControlException(org.apache.hadoop.security.AccessControlException) Closeable(java.io.Closeable) Table(org.apache.hadoop.hbase.client.Table) Comparator(java.util.Comparator) Collections(java.util.Collections) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) Delete(org.apache.hadoop.hbase.client.Delete) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) ArrayList(java.util.ArrayList) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) Result(org.apache.hadoop.hbase.client.Result) TableNotFoundException(org.apache.hadoop.hbase.TableNotFoundException) Scan(org.apache.hadoop.hbase.client.Scan) ReplicationException(org.apache.hadoop.hbase.replication.ReplicationException) ReplicationPeerDescription(org.apache.hadoop.hbase.replication.ReplicationPeerDescription) ReplicationQueueStorage(org.apache.hadoop.hbase.replication.ReplicationQueueStorage)

Example 13 with ReplicationPeerDescription

use of org.apache.hadoop.hbase.replication.ReplicationPeerDescription in project hbase by apache.

the class ReplicationMetaCleaner method chore.

@Override
protected void chore() {
    try {
        Map<String, HTableDescriptor> tables = master.getTableDescriptors().getAllDescriptors();
        Map<String, Set<String>> serialTables = new HashMap<>();
        for (Map.Entry<String, HTableDescriptor> entry : tables.entrySet()) {
            boolean hasSerialScope = false;
            for (HColumnDescriptor column : entry.getValue().getFamilies()) {
                if (column.getScope() == HConstants.REPLICATION_SCOPE_SERIAL) {
                    hasSerialScope = true;
                    break;
                }
            }
            if (hasSerialScope) {
                serialTables.put(entry.getValue().getTableName().getNameAsString(), new HashSet<>());
            }
        }
        if (serialTables.isEmpty()) {
            return;
        }
        List<ReplicationPeerDescription> peers = admin.listReplicationPeers();
        for (ReplicationPeerDescription peerDesc : peers) {
            for (Map.Entry<TableName, List<String>> map : peerDesc.getPeerConfig().getTableCFsMap().entrySet()) {
                if (serialTables.containsKey(map.getKey().getNameAsString())) {
                    serialTables.get(map.getKey().getNameAsString()).add(peerDesc.getPeerId());
                    break;
                }
            }
        }
        Map<String, List<Long>> barrierMap = MetaTableAccessor.getAllBarriers(master.getConnection());
        for (Map.Entry<String, List<Long>> entry : barrierMap.entrySet()) {
            String encodedName = entry.getKey();
            byte[] encodedBytes = Bytes.toBytes(encodedName);
            boolean canClearRegion = false;
            Map<String, Long> posMap = MetaTableAccessor.getReplicationPositionForAllPeer(master.getConnection(), encodedBytes);
            if (posMap.isEmpty()) {
                continue;
            }
            String tableName = MetaTableAccessor.getSerialReplicationTableName(master.getConnection(), encodedBytes);
            Set<String> confPeers = serialTables.get(tableName);
            if (confPeers == null) {
                // This table doesn't exist or all cf's scope is not serial any more, we can clear meta.
                canClearRegion = true;
            } else {
                if (!allPeersHavePosition(confPeers, posMap)) {
                    continue;
                }
                String daughterValue = MetaTableAccessor.getSerialReplicationDaughterRegion(master.getConnection(), encodedBytes);
                if (daughterValue != null) {
                    //this region is merged or split
                    boolean allDaughterStart = true;
                    String[] daughterRegions = daughterValue.split(",");
                    for (String daughter : daughterRegions) {
                        byte[] region = Bytes.toBytes(daughter);
                        if (!MetaTableAccessor.getReplicationBarriers(master.getConnection(), region).isEmpty() && !allPeersHavePosition(confPeers, MetaTableAccessor.getReplicationPositionForAllPeer(master.getConnection(), region))) {
                            allDaughterStart = false;
                            break;
                        }
                    }
                    if (allDaughterStart) {
                        canClearRegion = true;
                    }
                }
            }
            if (canClearRegion) {
                Delete delete = new Delete(encodedBytes);
                delete.addFamily(HConstants.REPLICATION_POSITION_FAMILY);
                delete.addFamily(HConstants.REPLICATION_BARRIER_FAMILY);
                delete.addFamily(HConstants.REPLICATION_META_FAMILY);
                try (Table metaTable = master.getConnection().getTable(TableName.META_TABLE_NAME)) {
                    metaTable.delete(delete);
                }
            } else {
                // Barriers whose seq is larger than min pos of all peers, and the last barrier whose seq
                // is smaller than min pos should be kept. All other barriers can be deleted.
                long minPos = Long.MAX_VALUE;
                for (Map.Entry<String, Long> pos : posMap.entrySet()) {
                    minPos = Math.min(minPos, pos.getValue());
                }
                List<Long> barriers = entry.getValue();
                int index = Collections.binarySearch(barriers, minPos);
                if (index < 0) {
                    index = -index - 1;
                }
                Delete delete = new Delete(encodedBytes);
                for (int i = 0; i < index - 1; i++) {
                    delete.addColumn(HConstants.REPLICATION_BARRIER_FAMILY, Bytes.toBytes(barriers.get(i)));
                }
                try (Table metaTable = master.getConnection().getTable(TableName.META_TABLE_NAME)) {
                    metaTable.delete(delete);
                }
            }
        }
    } catch (IOException e) {
        LOG.error("Exception during cleaning up.", e);
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) Set(java.util.Set) HashSet(java.util.HashSet) HashMap(java.util.HashMap) List(java.util.List) ReplicationPeerDescription(org.apache.hadoop.hbase.replication.ReplicationPeerDescription) Table(org.apache.hadoop.hbase.client.Table) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) IOException(java.io.IOException) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) TableName(org.apache.hadoop.hbase.TableName) HashMap(java.util.HashMap) Map(java.util.Map)

Example 14 with ReplicationPeerDescription

use of org.apache.hadoop.hbase.replication.ReplicationPeerDescription in project hbase by apache.

the class ReplicationAdmin method listPeerConfigs.

/**
   * @deprecated use {@link org.apache.hadoop.hbase.client.Admin#listReplicationPeers()} instead
   */
@Deprecated
public Map<String, ReplicationPeerConfig> listPeerConfigs() throws IOException {
    List<ReplicationPeerDescription> peers = this.admin.listReplicationPeers();
    Map<String, ReplicationPeerConfig> result = new TreeMap<>();
    for (ReplicationPeerDescription peer : peers) {
        result.put(peer.getPeerId(), peer.getPeerConfig());
    }
    return result;
}
Also used : ReplicationPeerConfig(org.apache.hadoop.hbase.replication.ReplicationPeerConfig) ReplicationPeerDescription(org.apache.hadoop.hbase.replication.ReplicationPeerDescription) TreeMap(java.util.TreeMap)

Example 15 with ReplicationPeerDescription

use of org.apache.hadoop.hbase.replication.ReplicationPeerDescription in project hbase by apache.

the class ReplicationManager method listReplicationPeers.

public List<ReplicationPeerDescription> listReplicationPeers(Pattern pattern) throws ReplicationException {
    List<ReplicationPeerDescription> peers = new ArrayList<>();
    List<String> peerIds = replicationPeers.getAllPeerIds();
    for (String peerId : peerIds) {
        if (pattern == null || (pattern != null && pattern.matcher(peerId).matches())) {
            peers.add(new ReplicationPeerDescription(peerId, replicationPeers.getStatusOfPeerFromBackingStore(peerId), replicationPeers.getReplicationPeerConfig(peerId)));
        }
    }
    return peers;
}
Also used : ArrayList(java.util.ArrayList) ReplicationPeerDescription(org.apache.hadoop.hbase.replication.ReplicationPeerDescription)

Aggregations

ReplicationPeerDescription (org.apache.hadoop.hbase.replication.ReplicationPeerDescription)23 ReplicationPeerConfig (org.apache.hadoop.hbase.replication.ReplicationPeerConfig)13 IOException (java.io.IOException)4 ArrayList (java.util.ArrayList)4 List (java.util.List)4 Map (java.util.Map)4 Set (java.util.Set)4 Configuration (org.apache.hadoop.conf.Configuration)4 Collections (java.util.Collections)3 HashMap (java.util.HashMap)3 Collectors (java.util.stream.Collectors)3 Path (org.apache.hadoop.fs.Path)3 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)3 ServerName (org.apache.hadoop.hbase.ServerName)3 SyncReplicationState (org.apache.hadoop.hbase.replication.SyncReplicationState)3 ZKUtil (org.apache.hadoop.hbase.zookeeper.ZKUtil)3 ZKWatcher (org.apache.hadoop.hbase.zookeeper.ZKWatcher)3 InterfaceAudience (org.apache.yetus.audience.InterfaceAudience)3 Logger (org.slf4j.Logger)3 LoggerFactory (org.slf4j.LoggerFactory)3