use of org.apache.hadoop.hbase.replication.ReplicationPeerDescription in project hbase by apache.
the class DumpReplicationQueues method dumpReplicationQueues.
private int dumpReplicationQueues(DumpOptions opts) throws Exception {
Configuration conf = getConf();
Connection connection = ConnectionFactory.createConnection(conf);
Admin admin = connection.getAdmin();
ZKWatcher zkw = new ZKWatcher(conf, "DumpReplicationQueues" + EnvironmentEdgeManager.currentTime(), new WarnOnlyAbortable(), true);
try {
// Our zk watcher
LOG.info("Our Quorum: " + zkw.getQuorum());
List<TableCFs> replicatedTableCFs = admin.listReplicatedTableCFs();
if (replicatedTableCFs.isEmpty()) {
LOG.info("No tables with a configured replication peer were found.");
return (0);
} else {
LOG.info("Replicated Tables: " + replicatedTableCFs);
}
List<ReplicationPeerDescription> peers = admin.listReplicationPeers();
if (peers.isEmpty()) {
LOG.info("Replication is enabled but no peer configuration was found.");
}
System.out.println("Dumping replication peers and configurations:");
System.out.println(dumpPeersState(peers));
if (opts.isDistributed()) {
LOG.info("Found [--distributed], will poll each RegionServer.");
Set<String> peerIds = peers.stream().map((peer) -> peer.getPeerId()).collect(Collectors.toSet());
System.out.println(dumpQueues(zkw, peerIds, opts.isHdfs()));
System.out.println(dumpReplicationSummary());
} else {
// use ZK instead
System.out.print("Dumping replication znodes via ZooKeeper:");
System.out.println(ZKDump.getReplicationZnodesDump(zkw));
}
return (0);
} catch (IOException e) {
return (-1);
} finally {
zkw.close();
}
}
use of org.apache.hadoop.hbase.replication.ReplicationPeerDescription in project hbase by apache.
the class HBaseFsck method cleanReplicationBarrier.
public void cleanReplicationBarrier() throws IOException {
if (!cleanReplicationBarrier || cleanReplicationBarrierTable == null) {
return;
}
if (cleanReplicationBarrierTable.isSystemTable()) {
errors.reportError(ERROR_CODE.INVALID_TABLE, "invalid table: " + cleanReplicationBarrierTable);
return;
}
boolean isGlobalScope = false;
try {
isGlobalScope = admin.getDescriptor(cleanReplicationBarrierTable).hasGlobalReplicationScope();
} catch (TableNotFoundException e) {
LOG.info("we may need to clean some erroneous data due to bugs");
}
if (isGlobalScope) {
errors.reportError(ERROR_CODE.INVALID_TABLE, "table's replication scope is global: " + cleanReplicationBarrierTable);
return;
}
List<byte[]> regionNames = new ArrayList<>();
Scan barrierScan = new Scan();
barrierScan.setCaching(100);
barrierScan.addFamily(HConstants.REPLICATION_BARRIER_FAMILY);
barrierScan.withStartRow(ClientMetaTableAccessor.getTableStartRowForMeta(cleanReplicationBarrierTable, ClientMetaTableAccessor.QueryType.REGION)).withStopRow(ClientMetaTableAccessor.getTableStopRowForMeta(cleanReplicationBarrierTable, ClientMetaTableAccessor.QueryType.REGION));
Result result;
try (ResultScanner scanner = meta.getScanner(barrierScan)) {
while ((result = scanner.next()) != null) {
regionNames.add(result.getRow());
}
}
if (regionNames.size() <= 0) {
errors.reportError(ERROR_CODE.INVALID_TABLE, "there is no barriers of this table: " + cleanReplicationBarrierTable);
return;
}
ReplicationQueueStorage queueStorage = ReplicationStorageFactory.getReplicationQueueStorage(zkw, getConf());
List<ReplicationPeerDescription> peerDescriptions = admin.listReplicationPeers();
if (peerDescriptions != null && peerDescriptions.size() > 0) {
List<String> peers = peerDescriptions.stream().filter(peerConfig -> peerConfig.getPeerConfig().needToReplicate(cleanReplicationBarrierTable)).map(peerConfig -> peerConfig.getPeerId()).collect(Collectors.toList());
try {
List<String> batch = new ArrayList<>();
for (String peer : peers) {
for (byte[] regionName : regionNames) {
batch.add(RegionInfo.encodeRegionName(regionName));
if (batch.size() % 100 == 0) {
queueStorage.removeLastSequenceIds(peer, batch);
batch.clear();
}
}
if (batch.size() > 0) {
queueStorage.removeLastSequenceIds(peer, batch);
batch.clear();
}
}
} catch (ReplicationException re) {
throw new IOException(re);
}
}
for (byte[] regionName : regionNames) {
meta.delete(new Delete(regionName).addFamily(HConstants.REPLICATION_BARRIER_FAMILY));
}
setShouldRerun();
}
use of org.apache.hadoop.hbase.replication.ReplicationPeerDescription in project hbase by apache.
the class ReplicationMetaCleaner method chore.
@Override
protected void chore() {
try {
Map<String, HTableDescriptor> tables = master.getTableDescriptors().getAllDescriptors();
Map<String, Set<String>> serialTables = new HashMap<>();
for (Map.Entry<String, HTableDescriptor> entry : tables.entrySet()) {
boolean hasSerialScope = false;
for (HColumnDescriptor column : entry.getValue().getFamilies()) {
if (column.getScope() == HConstants.REPLICATION_SCOPE_SERIAL) {
hasSerialScope = true;
break;
}
}
if (hasSerialScope) {
serialTables.put(entry.getValue().getTableName().getNameAsString(), new HashSet<>());
}
}
if (serialTables.isEmpty()) {
return;
}
List<ReplicationPeerDescription> peers = admin.listReplicationPeers();
for (ReplicationPeerDescription peerDesc : peers) {
for (Map.Entry<TableName, List<String>> map : peerDesc.getPeerConfig().getTableCFsMap().entrySet()) {
if (serialTables.containsKey(map.getKey().getNameAsString())) {
serialTables.get(map.getKey().getNameAsString()).add(peerDesc.getPeerId());
break;
}
}
}
Map<String, List<Long>> barrierMap = MetaTableAccessor.getAllBarriers(master.getConnection());
for (Map.Entry<String, List<Long>> entry : barrierMap.entrySet()) {
String encodedName = entry.getKey();
byte[] encodedBytes = Bytes.toBytes(encodedName);
boolean canClearRegion = false;
Map<String, Long> posMap = MetaTableAccessor.getReplicationPositionForAllPeer(master.getConnection(), encodedBytes);
if (posMap.isEmpty()) {
continue;
}
String tableName = MetaTableAccessor.getSerialReplicationTableName(master.getConnection(), encodedBytes);
Set<String> confPeers = serialTables.get(tableName);
if (confPeers == null) {
// This table doesn't exist or all cf's scope is not serial any more, we can clear meta.
canClearRegion = true;
} else {
if (!allPeersHavePosition(confPeers, posMap)) {
continue;
}
String daughterValue = MetaTableAccessor.getSerialReplicationDaughterRegion(master.getConnection(), encodedBytes);
if (daughterValue != null) {
//this region is merged or split
boolean allDaughterStart = true;
String[] daughterRegions = daughterValue.split(",");
for (String daughter : daughterRegions) {
byte[] region = Bytes.toBytes(daughter);
if (!MetaTableAccessor.getReplicationBarriers(master.getConnection(), region).isEmpty() && !allPeersHavePosition(confPeers, MetaTableAccessor.getReplicationPositionForAllPeer(master.getConnection(), region))) {
allDaughterStart = false;
break;
}
}
if (allDaughterStart) {
canClearRegion = true;
}
}
}
if (canClearRegion) {
Delete delete = new Delete(encodedBytes);
delete.addFamily(HConstants.REPLICATION_POSITION_FAMILY);
delete.addFamily(HConstants.REPLICATION_BARRIER_FAMILY);
delete.addFamily(HConstants.REPLICATION_META_FAMILY);
try (Table metaTable = master.getConnection().getTable(TableName.META_TABLE_NAME)) {
metaTable.delete(delete);
}
} else {
// Barriers whose seq is larger than min pos of all peers, and the last barrier whose seq
// is smaller than min pos should be kept. All other barriers can be deleted.
long minPos = Long.MAX_VALUE;
for (Map.Entry<String, Long> pos : posMap.entrySet()) {
minPos = Math.min(minPos, pos.getValue());
}
List<Long> barriers = entry.getValue();
int index = Collections.binarySearch(barriers, minPos);
if (index < 0) {
index = -index - 1;
}
Delete delete = new Delete(encodedBytes);
for (int i = 0; i < index - 1; i++) {
delete.addColumn(HConstants.REPLICATION_BARRIER_FAMILY, Bytes.toBytes(barriers.get(i)));
}
try (Table metaTable = master.getConnection().getTable(TableName.META_TABLE_NAME)) {
metaTable.delete(delete);
}
}
}
} catch (IOException e) {
LOG.error("Exception during cleaning up.", e);
}
}
use of org.apache.hadoop.hbase.replication.ReplicationPeerDescription in project hbase by apache.
the class ReplicationAdmin method listPeerConfigs.
/**
* @deprecated use {@link org.apache.hadoop.hbase.client.Admin#listReplicationPeers()} instead
*/
@Deprecated
public Map<String, ReplicationPeerConfig> listPeerConfigs() throws IOException {
List<ReplicationPeerDescription> peers = this.admin.listReplicationPeers();
Map<String, ReplicationPeerConfig> result = new TreeMap<>();
for (ReplicationPeerDescription peer : peers) {
result.put(peer.getPeerId(), peer.getPeerConfig());
}
return result;
}
use of org.apache.hadoop.hbase.replication.ReplicationPeerDescription in project hbase by apache.
the class ReplicationManager method listReplicationPeers.
public List<ReplicationPeerDescription> listReplicationPeers(Pattern pattern) throws ReplicationException {
List<ReplicationPeerDescription> peers = new ArrayList<>();
List<String> peerIds = replicationPeers.getAllPeerIds();
for (String peerId : peerIds) {
if (pattern == null || (pattern != null && pattern.matcher(peerId).matches())) {
peers.add(new ReplicationPeerDescription(peerId, replicationPeers.getStatusOfPeerFromBackingStore(peerId), replicationPeers.getReplicationPeerConfig(peerId)));
}
}
return peers;
}
Aggregations