use of org.apache.hadoop.hbase.replication.ReplicationPeerDescription in project hbase by apache.
the class ReplicationAdmin method listPeerConfigs.
/**
* @deprecated use {@link org.apache.hadoop.hbase.client.Admin#listReplicationPeers()} instead
*/
@Deprecated
public Map<String, ReplicationPeerConfig> listPeerConfigs() throws IOException {
List<ReplicationPeerDescription> peers = this.admin.listReplicationPeers();
Map<String, ReplicationPeerConfig> result = new TreeMap<>();
for (ReplicationPeerDescription peer : peers) {
result.put(peer.getPeerId(), peer.getPeerConfig());
}
return result;
}
use of org.apache.hadoop.hbase.replication.ReplicationPeerDescription in project hbase by apache.
the class ReplicationManager method listReplicationPeers.
public List<ReplicationPeerDescription> listReplicationPeers(Pattern pattern) throws ReplicationException {
List<ReplicationPeerDescription> peers = new ArrayList<>();
List<String> peerIds = replicationPeers.getAllPeerIds();
for (String peerId : peerIds) {
if (pattern == null || (pattern != null && pattern.matcher(peerId).matches())) {
peers.add(new ReplicationPeerDescription(peerId, replicationPeers.getStatusOfPeerFromBackingStore(peerId), replicationPeers.getReplicationPeerConfig(peerId)));
}
}
return peers;
}
use of org.apache.hadoop.hbase.replication.ReplicationPeerDescription in project hbase by apache.
the class ReplicationMetaCleaner method chore.
@Override
protected void chore() {
try {
Map<String, HTableDescriptor> tables = master.getTableDescriptors().getAllDescriptors();
Map<String, Set<String>> serialTables = new HashMap<>();
for (Map.Entry<String, HTableDescriptor> entry : tables.entrySet()) {
boolean hasSerialScope = false;
for (HColumnDescriptor column : entry.getValue().getFamilies()) {
if (column.getScope() == HConstants.REPLICATION_SCOPE_SERIAL) {
hasSerialScope = true;
break;
}
}
if (hasSerialScope) {
serialTables.put(entry.getValue().getTableName().getNameAsString(), new HashSet<>());
}
}
if (serialTables.isEmpty()) {
return;
}
List<ReplicationPeerDescription> peers = admin.listReplicationPeers();
for (ReplicationPeerDescription peerDesc : peers) {
for (Map.Entry<TableName, List<String>> map : peerDesc.getPeerConfig().getTableCFsMap().entrySet()) {
if (serialTables.containsKey(map.getKey().getNameAsString())) {
serialTables.get(map.getKey().getNameAsString()).add(peerDesc.getPeerId());
break;
}
}
}
Map<String, List<Long>> barrierMap = MetaTableAccessor.getAllBarriers(master.getConnection());
for (Map.Entry<String, List<Long>> entry : barrierMap.entrySet()) {
String encodedName = entry.getKey();
byte[] encodedBytes = Bytes.toBytes(encodedName);
boolean canClearRegion = false;
Map<String, Long> posMap = MetaTableAccessor.getReplicationPositionForAllPeer(master.getConnection(), encodedBytes);
if (posMap.isEmpty()) {
continue;
}
String tableName = MetaTableAccessor.getSerialReplicationTableName(master.getConnection(), encodedBytes);
Set<String> confPeers = serialTables.get(tableName);
if (confPeers == null) {
// This table doesn't exist or all cf's scope is not serial any more, we can clear meta.
canClearRegion = true;
} else {
if (!allPeersHavePosition(confPeers, posMap)) {
continue;
}
String daughterValue = MetaTableAccessor.getSerialReplicationDaughterRegion(master.getConnection(), encodedBytes);
if (daughterValue != null) {
//this region is merged or split
boolean allDaughterStart = true;
String[] daughterRegions = daughterValue.split(",");
for (String daughter : daughterRegions) {
byte[] region = Bytes.toBytes(daughter);
if (!MetaTableAccessor.getReplicationBarriers(master.getConnection(), region).isEmpty() && !allPeersHavePosition(confPeers, MetaTableAccessor.getReplicationPositionForAllPeer(master.getConnection(), region))) {
allDaughterStart = false;
break;
}
}
if (allDaughterStart) {
canClearRegion = true;
}
}
}
if (canClearRegion) {
Delete delete = new Delete(encodedBytes);
delete.addFamily(HConstants.REPLICATION_POSITION_FAMILY);
delete.addFamily(HConstants.REPLICATION_BARRIER_FAMILY);
delete.addFamily(HConstants.REPLICATION_META_FAMILY);
try (Table metaTable = master.getConnection().getTable(TableName.META_TABLE_NAME)) {
metaTable.delete(delete);
}
} else {
// Barriers whose seq is larger than min pos of all peers, and the last barrier whose seq
// is smaller than min pos should be kept. All other barriers can be deleted.
long minPos = Long.MAX_VALUE;
for (Map.Entry<String, Long> pos : posMap.entrySet()) {
minPos = Math.min(minPos, pos.getValue());
}
List<Long> barriers = entry.getValue();
int index = Collections.binarySearch(barriers, minPos);
if (index < 0) {
index = -index - 1;
}
Delete delete = new Delete(encodedBytes);
for (int i = 0; i < index - 1; i++) {
delete.addColumn(HConstants.REPLICATION_BARRIER_FAMILY, Bytes.toBytes(barriers.get(i)));
}
try (Table metaTable = master.getConnection().getTable(TableName.META_TABLE_NAME)) {
metaTable.delete(delete);
}
}
}
} catch (IOException e) {
LOG.error("Exception during cleaning up.", e);
}
}
use of org.apache.hadoop.hbase.replication.ReplicationPeerDescription in project hbase by apache.
the class DumpReplicationQueues method dumpPeersState.
public String dumpPeersState(List<ReplicationPeerDescription> peers) throws Exception {
Map<String, String> currentConf;
StringBuilder sb = new StringBuilder();
for (ReplicationPeerDescription peer : peers) {
ReplicationPeerConfig peerConfig = peer.getPeerConfig();
sb.append("Peer: " + peer.getPeerId() + "\n");
sb.append(" " + "State: " + (peer.isEnabled() ? "ENABLED" : "DISABLED") + "\n");
sb.append(" " + "Cluster Name: " + peerConfig.getClusterKey() + "\n");
sb.append(" " + "Replication Endpoint: " + peerConfig.getReplicationEndpointImpl() + "\n");
currentConf = peerConfig.getConfiguration();
// Only show when we have a custom configuration for the peer
if (currentConf.size() > 1) {
sb.append(" " + "Peer Configuration: " + currentConf + "\n");
}
sb.append(" " + "Peer Table CFs: " + peerConfig.getTableCFsMap() + "\n");
sb.append(" " + "Peer Namespaces: " + peerConfig.getNamespaces() + "\n");
}
return sb.toString();
}
Aggregations