Search in sources :

Example 6 with ReplicationPeerDescription

use of org.apache.hadoop.hbase.replication.ReplicationPeerDescription in project hbase by apache.

the class ReplicationAdmin method listPeerConfigs.

/**
   * @deprecated use {@link org.apache.hadoop.hbase.client.Admin#listReplicationPeers()} instead
   */
@Deprecated
public Map<String, ReplicationPeerConfig> listPeerConfigs() throws IOException {
    List<ReplicationPeerDescription> peers = this.admin.listReplicationPeers();
    Map<String, ReplicationPeerConfig> result = new TreeMap<>();
    for (ReplicationPeerDescription peer : peers) {
        result.put(peer.getPeerId(), peer.getPeerConfig());
    }
    return result;
}
Also used : ReplicationPeerConfig(org.apache.hadoop.hbase.replication.ReplicationPeerConfig) ReplicationPeerDescription(org.apache.hadoop.hbase.replication.ReplicationPeerDescription) TreeMap(java.util.TreeMap)

Example 7 with ReplicationPeerDescription

use of org.apache.hadoop.hbase.replication.ReplicationPeerDescription in project hbase by apache.

the class ReplicationManager method listReplicationPeers.

public List<ReplicationPeerDescription> listReplicationPeers(Pattern pattern) throws ReplicationException {
    List<ReplicationPeerDescription> peers = new ArrayList<>();
    List<String> peerIds = replicationPeers.getAllPeerIds();
    for (String peerId : peerIds) {
        if (pattern == null || (pattern != null && pattern.matcher(peerId).matches())) {
            peers.add(new ReplicationPeerDescription(peerId, replicationPeers.getStatusOfPeerFromBackingStore(peerId), replicationPeers.getReplicationPeerConfig(peerId)));
        }
    }
    return peers;
}
Also used : ArrayList(java.util.ArrayList) ReplicationPeerDescription(org.apache.hadoop.hbase.replication.ReplicationPeerDescription)

Example 8 with ReplicationPeerDescription

use of org.apache.hadoop.hbase.replication.ReplicationPeerDescription in project hbase by apache.

the class ReplicationMetaCleaner method chore.

@Override
protected void chore() {
    try {
        Map<String, HTableDescriptor> tables = master.getTableDescriptors().getAllDescriptors();
        Map<String, Set<String>> serialTables = new HashMap<>();
        for (Map.Entry<String, HTableDescriptor> entry : tables.entrySet()) {
            boolean hasSerialScope = false;
            for (HColumnDescriptor column : entry.getValue().getFamilies()) {
                if (column.getScope() == HConstants.REPLICATION_SCOPE_SERIAL) {
                    hasSerialScope = true;
                    break;
                }
            }
            if (hasSerialScope) {
                serialTables.put(entry.getValue().getTableName().getNameAsString(), new HashSet<>());
            }
        }
        if (serialTables.isEmpty()) {
            return;
        }
        List<ReplicationPeerDescription> peers = admin.listReplicationPeers();
        for (ReplicationPeerDescription peerDesc : peers) {
            for (Map.Entry<TableName, List<String>> map : peerDesc.getPeerConfig().getTableCFsMap().entrySet()) {
                if (serialTables.containsKey(map.getKey().getNameAsString())) {
                    serialTables.get(map.getKey().getNameAsString()).add(peerDesc.getPeerId());
                    break;
                }
            }
        }
        Map<String, List<Long>> barrierMap = MetaTableAccessor.getAllBarriers(master.getConnection());
        for (Map.Entry<String, List<Long>> entry : barrierMap.entrySet()) {
            String encodedName = entry.getKey();
            byte[] encodedBytes = Bytes.toBytes(encodedName);
            boolean canClearRegion = false;
            Map<String, Long> posMap = MetaTableAccessor.getReplicationPositionForAllPeer(master.getConnection(), encodedBytes);
            if (posMap.isEmpty()) {
                continue;
            }
            String tableName = MetaTableAccessor.getSerialReplicationTableName(master.getConnection(), encodedBytes);
            Set<String> confPeers = serialTables.get(tableName);
            if (confPeers == null) {
                // This table doesn't exist or all cf's scope is not serial any more, we can clear meta.
                canClearRegion = true;
            } else {
                if (!allPeersHavePosition(confPeers, posMap)) {
                    continue;
                }
                String daughterValue = MetaTableAccessor.getSerialReplicationDaughterRegion(master.getConnection(), encodedBytes);
                if (daughterValue != null) {
                    //this region is merged or split
                    boolean allDaughterStart = true;
                    String[] daughterRegions = daughterValue.split(",");
                    for (String daughter : daughterRegions) {
                        byte[] region = Bytes.toBytes(daughter);
                        if (!MetaTableAccessor.getReplicationBarriers(master.getConnection(), region).isEmpty() && !allPeersHavePosition(confPeers, MetaTableAccessor.getReplicationPositionForAllPeer(master.getConnection(), region))) {
                            allDaughterStart = false;
                            break;
                        }
                    }
                    if (allDaughterStart) {
                        canClearRegion = true;
                    }
                }
            }
            if (canClearRegion) {
                Delete delete = new Delete(encodedBytes);
                delete.addFamily(HConstants.REPLICATION_POSITION_FAMILY);
                delete.addFamily(HConstants.REPLICATION_BARRIER_FAMILY);
                delete.addFamily(HConstants.REPLICATION_META_FAMILY);
                try (Table metaTable = master.getConnection().getTable(TableName.META_TABLE_NAME)) {
                    metaTable.delete(delete);
                }
            } else {
                // Barriers whose seq is larger than min pos of all peers, and the last barrier whose seq
                // is smaller than min pos should be kept. All other barriers can be deleted.
                long minPos = Long.MAX_VALUE;
                for (Map.Entry<String, Long> pos : posMap.entrySet()) {
                    minPos = Math.min(minPos, pos.getValue());
                }
                List<Long> barriers = entry.getValue();
                int index = Collections.binarySearch(barriers, minPos);
                if (index < 0) {
                    index = -index - 1;
                }
                Delete delete = new Delete(encodedBytes);
                for (int i = 0; i < index - 1; i++) {
                    delete.addColumn(HConstants.REPLICATION_BARRIER_FAMILY, Bytes.toBytes(barriers.get(i)));
                }
                try (Table metaTable = master.getConnection().getTable(TableName.META_TABLE_NAME)) {
                    metaTable.delete(delete);
                }
            }
        }
    } catch (IOException e) {
        LOG.error("Exception during cleaning up.", e);
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) Set(java.util.Set) HashSet(java.util.HashSet) HashMap(java.util.HashMap) List(java.util.List) ReplicationPeerDescription(org.apache.hadoop.hbase.replication.ReplicationPeerDescription) Table(org.apache.hadoop.hbase.client.Table) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) IOException(java.io.IOException) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) TableName(org.apache.hadoop.hbase.TableName) HashMap(java.util.HashMap) Map(java.util.Map)

Example 9 with ReplicationPeerDescription

use of org.apache.hadoop.hbase.replication.ReplicationPeerDescription in project hbase by apache.

the class DumpReplicationQueues method dumpPeersState.

public String dumpPeersState(List<ReplicationPeerDescription> peers) throws Exception {
    Map<String, String> currentConf;
    StringBuilder sb = new StringBuilder();
    for (ReplicationPeerDescription peer : peers) {
        ReplicationPeerConfig peerConfig = peer.getPeerConfig();
        sb.append("Peer: " + peer.getPeerId() + "\n");
        sb.append("    " + "State: " + (peer.isEnabled() ? "ENABLED" : "DISABLED") + "\n");
        sb.append("    " + "Cluster Name: " + peerConfig.getClusterKey() + "\n");
        sb.append("    " + "Replication Endpoint: " + peerConfig.getReplicationEndpointImpl() + "\n");
        currentConf = peerConfig.getConfiguration();
        // Only show when we have a custom configuration for the peer
        if (currentConf.size() > 1) {
            sb.append("    " + "Peer Configuration: " + currentConf + "\n");
        }
        sb.append("    " + "Peer Table CFs: " + peerConfig.getTableCFsMap() + "\n");
        sb.append("    " + "Peer Namespaces: " + peerConfig.getNamespaces() + "\n");
    }
    return sb.toString();
}
Also used : ReplicationPeerConfig(org.apache.hadoop.hbase.replication.ReplicationPeerConfig) ReplicationPeerDescription(org.apache.hadoop.hbase.replication.ReplicationPeerDescription)

Aggregations

ReplicationPeerDescription (org.apache.hadoop.hbase.replication.ReplicationPeerDescription)9 ReplicationPeerConfig (org.apache.hadoop.hbase.replication.ReplicationPeerConfig)5 IOException (java.io.IOException)2 ArrayList (java.util.ArrayList)2 List (java.util.List)2 Map (java.util.Map)2 Set (java.util.Set)2 Configuration (org.apache.hadoop.conf.Configuration)2 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)2 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)2 AtomicLongMap (com.google.common.util.concurrent.AtomicLongMap)1 FileNotFoundException (java.io.FileNotFoundException)1 Arrays (java.util.Arrays)1 HashMap (java.util.HashMap)1 HashSet (java.util.HashSet)1 LinkedList (java.util.LinkedList)1 Queue (java.util.Queue)1 TreeMap (java.util.TreeMap)1 Pattern (java.util.regex.Pattern)1 Collectors (java.util.stream.Collectors)1