use of org.apache.hadoop.hbase.replication.ReplicationQueueStorage in project hbase by apache.
the class ReplicationBarrierCleaner method chore.
@Override
public synchronized // running instance at a time.
void chore() {
long totalRows = 0;
long cleanedRows = 0;
long deletedRows = 0;
long deletedBarriers = 0;
long deletedLastPushedSeqIds = 0;
TableName tableName = null;
List<String> peerIds = null;
try (Table metaTable = conn.getTable(TableName.META_TABLE_NAME);
ResultScanner scanner = metaTable.getScanner(new Scan().addFamily(HConstants.REPLICATION_BARRIER_FAMILY).readAllVersions())) {
for (; ; ) {
Result result = scanner.next();
if (result == null) {
break;
}
totalRows++;
long[] barriers = ReplicationBarrierFamilyFormat.getReplicationBarriers(result);
if (barriers.length == 0) {
continue;
}
byte[] regionName = result.getRow();
TableName tn = RegionInfo.getTable(regionName);
if (!tn.equals(tableName)) {
tableName = tn;
peerIds = peerManager.getSerialPeerIdsBelongsTo(tableName);
}
if (peerIds.isEmpty()) {
// check if the region has already been removed, i.e, no catalog family
if (metaTable.exists(new Get(regionName).addFamily(HConstants.CATALOG_FAMILY))) {
// exists, then only keep the newest barrier
Cell cell = result.getColumnLatestCell(HConstants.REPLICATION_BARRIER_FAMILY, HConstants.SEQNUM_QUALIFIER);
metaTable.delete(new Delete(regionName).addFamily(HConstants.REPLICATION_BARRIER_FAMILY, cell.getTimestamp() - 1));
deletedBarriers += barriers.length - 1;
} else {
// not exists, delete all the barriers
metaTable.delete(new Delete(regionName).addFamily(HConstants.REPLICATION_BARRIER_FAMILY));
deletedBarriers += barriers.length;
}
cleanedRows++;
continue;
}
String encodedRegionName = RegionInfo.encodeRegionName(regionName);
long pushedSeqId = Long.MAX_VALUE;
for (String peerId : peerIds) {
pushedSeqId = Math.min(pushedSeqId, peerManager.getQueueStorage().getLastSequenceId(encodedRegionName, peerId));
}
int index = Arrays.binarySearch(barriers, pushedSeqId);
if (index == -1) {
// for it.
continue;
}
if (index < 0) {
index = -index - 1;
} else {
index++;
}
// closed range and the pushedSeqId is the last barrier minus 1.
if (index == barriers.length - 1 && pushedSeqId == barriers[barriers.length - 1] - 1) {
// check if the region has already been removed, i.e, no catalog family
if (!metaTable.exists(new Get(regionName).addFamily(HConstants.CATALOG_FAMILY))) {
ReplicationQueueStorage queueStorage = peerManager.getQueueStorage();
for (String peerId : peerIds) {
queueStorage.removeLastSequenceIds(peerId, Arrays.asList(encodedRegionName));
deletedLastPushedSeqIds++;
}
metaTable.delete(new Delete(regionName).addFamily(HConstants.REPLICATION_BARRIER_FAMILY));
deletedRows++;
deletedBarriers += barriers.length;
continue;
}
}
// SerialReplicationChecker for more details.
if (index - 1 > 0) {
List<Cell> cells = result.getColumnCells(HConstants.REPLICATION_BARRIER_FAMILY, HConstants.SEQNUM_QUALIFIER);
// All barriers before this cell(exclusive) can be removed
Cell cell = cells.get(cells.size() - index);
metaTable.delete(new Delete(regionName).addFamily(HConstants.REPLICATION_BARRIER_FAMILY, cell.getTimestamp() - 1));
cleanedRows++;
deletedBarriers += index - 1;
}
}
} catch (ReplicationException | IOException e) {
LOG.warn("Failed to clean up replication barrier", e);
}
if (totalRows > 0) {
LOG.info("TotalRows={}, cleanedRows={}, deletedRows={}, deletedBarriers={}, " + "deletedLastPushedSeqIds={}", totalRows, cleanedRows, deletedRows, deletedBarriers, deletedLastPushedSeqIds);
}
}
use of org.apache.hadoop.hbase.replication.ReplicationQueueStorage in project hbase by apache.
the class UpdatePeerConfigProcedure method updateLastPushedSequenceIdForSerialPeer.
@Override
protected void updateLastPushedSequenceIdForSerialPeer(MasterProcedureEnv env) throws IOException, ReplicationException {
if (!oldPeerConfig.isSerial()) {
assert peerConfig.isSerial();
// change to serial
setLastPushedSequenceId(env, peerConfig);
return;
}
if (!peerConfig.isSerial()) {
// remove the serial flag
env.getReplicationPeerManager().removeAllLastPushedSeqIds(peerId);
return;
}
// enter here means peerConfig and oldPeerConfig are both serial, let's find out the diffs and
// process them
ReplicationQueueStorage queueStorage = env.getReplicationPeerManager().getQueueStorage();
Connection conn = env.getMasterServices().getConnection();
Map<String, Long> lastSeqIds = new HashMap<String, Long>();
List<String> encodedRegionNames = new ArrayList<>();
for (TableDescriptor td : env.getMasterServices().getTableDescriptors().getAll().values()) {
if (!td.hasGlobalReplicationScope()) {
continue;
}
TableName tn = td.getTableName();
if (oldPeerConfig.needToReplicate(tn)) {
if (!peerConfig.needToReplicate(tn)) {
// removed from peer config
for (String encodedRegionName : ReplicationBarrierFamilyFormat.getTableEncodedRegionNamesForSerialReplication(conn, tn)) {
addToList(encodedRegionNames, encodedRegionName, queueStorage);
}
}
} else if (peerConfig.needToReplicate(tn)) {
// newly added to peer config
setLastPushedSequenceIdForTable(env, tn, lastSeqIds);
}
}
if (!encodedRegionNames.isEmpty()) {
queueStorage.removeLastSequenceIds(peerId, encodedRegionNames);
}
if (!lastSeqIds.isEmpty()) {
queueStorage.setLastSequenceIds(peerId, lastSeqIds);
}
}
use of org.apache.hadoop.hbase.replication.ReplicationQueueStorage in project hbase by apache.
the class AbstractPeerProcedure method setLastPushedSequenceIdForTable.
// Will put the encodedRegionName->lastPushedSeqId pair into the map passed in, if the map is
// large enough we will call queueStorage.setLastSequenceIds and clear the map. So the caller
// should not forget to check whether the map is empty at last, if not you should call
// queueStorage.setLastSequenceIds to write out the remaining entries in the map.
protected final void setLastPushedSequenceIdForTable(MasterProcedureEnv env, TableName tableName, Map<String, Long> lastSeqIds) throws IOException, ReplicationException {
TableStateManager tsm = env.getMasterServices().getTableStateManager();
ReplicationQueueStorage queueStorage = env.getReplicationPeerManager().getQueueStorage();
Connection conn = env.getMasterServices().getConnection();
if (!needSetLastPushedSequenceId(tsm, tableName)) {
LOG.debug("Skip settting last pushed sequence id for {}", tableName);
return;
}
for (Pair<String, Long> name2Barrier : ReplicationBarrierFamilyFormat.getTableEncodedRegionNameAndLastBarrier(conn, tableName)) {
LOG.trace("Update last pushed sequence id for {}, {}", tableName, name2Barrier);
addToMap(lastSeqIds, name2Barrier.getFirst(), name2Barrier.getSecond().longValue() - 1, queueStorage);
}
}
use of org.apache.hadoop.hbase.replication.ReplicationQueueStorage in project hbase by apache.
the class DumpReplicationQueues method dumpQueues.
public String dumpQueues(ZKWatcher zkw, Set<String> peerIds, boolean hdfs) throws Exception {
ReplicationQueueStorage queueStorage;
StringBuilder sb = new StringBuilder();
queueStorage = ReplicationStorageFactory.getReplicationQueueStorage(zkw, getConf());
Set<ServerName> liveRegionServers = ZKUtil.listChildrenNoWatch(zkw, zkw.getZNodePaths().rsZNode).stream().map(ServerName::parseServerName).collect(Collectors.toSet());
// Loops each peer on each RS and dumps the queues
List<ServerName> regionservers = queueStorage.getListOfReplicators();
if (regionservers == null || regionservers.isEmpty()) {
return sb.toString();
}
for (ServerName regionserver : regionservers) {
List<String> queueIds = queueStorage.getAllQueues(regionserver);
if (!liveRegionServers.contains(regionserver)) {
deadRegionServers.add(regionserver.getServerName());
}
for (String queueId : queueIds) {
ReplicationQueueInfo queueInfo = new ReplicationQueueInfo(queueId);
List<String> wals = queueStorage.getWALsInQueue(regionserver, queueId);
Collections.sort(wals);
if (!peerIds.contains(queueInfo.getPeerId())) {
deletedQueues.add(regionserver + "/" + queueId);
sb.append(formatQueue(regionserver, queueStorage, queueInfo, queueId, wals, true, hdfs));
} else {
sb.append(formatQueue(regionserver, queueStorage, queueInfo, queueId, wals, false, hdfs));
}
}
}
return sb.toString();
}
use of org.apache.hadoop.hbase.replication.ReplicationQueueStorage in project hbase by apache.
the class HBaseFsck method cleanReplicationBarrier.
public void cleanReplicationBarrier() throws IOException {
if (!cleanReplicationBarrier || cleanReplicationBarrierTable == null) {
return;
}
if (cleanReplicationBarrierTable.isSystemTable()) {
errors.reportError(ERROR_CODE.INVALID_TABLE, "invalid table: " + cleanReplicationBarrierTable);
return;
}
boolean isGlobalScope = false;
try {
isGlobalScope = admin.getDescriptor(cleanReplicationBarrierTable).hasGlobalReplicationScope();
} catch (TableNotFoundException e) {
LOG.info("we may need to clean some erroneous data due to bugs");
}
if (isGlobalScope) {
errors.reportError(ERROR_CODE.INVALID_TABLE, "table's replication scope is global: " + cleanReplicationBarrierTable);
return;
}
List<byte[]> regionNames = new ArrayList<>();
Scan barrierScan = new Scan();
barrierScan.setCaching(100);
barrierScan.addFamily(HConstants.REPLICATION_BARRIER_FAMILY);
barrierScan.withStartRow(ClientMetaTableAccessor.getTableStartRowForMeta(cleanReplicationBarrierTable, ClientMetaTableAccessor.QueryType.REGION)).withStopRow(ClientMetaTableAccessor.getTableStopRowForMeta(cleanReplicationBarrierTable, ClientMetaTableAccessor.QueryType.REGION));
Result result;
try (ResultScanner scanner = meta.getScanner(barrierScan)) {
while ((result = scanner.next()) != null) {
regionNames.add(result.getRow());
}
}
if (regionNames.size() <= 0) {
errors.reportError(ERROR_CODE.INVALID_TABLE, "there is no barriers of this table: " + cleanReplicationBarrierTable);
return;
}
ReplicationQueueStorage queueStorage = ReplicationStorageFactory.getReplicationQueueStorage(zkw, getConf());
List<ReplicationPeerDescription> peerDescriptions = admin.listReplicationPeers();
if (peerDescriptions != null && peerDescriptions.size() > 0) {
List<String> peers = peerDescriptions.stream().filter(peerConfig -> peerConfig.getPeerConfig().needToReplicate(cleanReplicationBarrierTable)).map(peerConfig -> peerConfig.getPeerId()).collect(Collectors.toList());
try {
List<String> batch = new ArrayList<>();
for (String peer : peers) {
for (byte[] regionName : regionNames) {
batch.add(RegionInfo.encodeRegionName(regionName));
if (batch.size() % 100 == 0) {
queueStorage.removeLastSequenceIds(peer, batch);
batch.clear();
}
}
if (batch.size() > 0) {
queueStorage.removeLastSequenceIds(peer, batch);
batch.clear();
}
}
} catch (ReplicationException re) {
throw new IOException(re);
}
}
for (byte[] regionName : regionNames) {
meta.delete(new Delete(regionName).addFamily(HConstants.REPLICATION_BARRIER_FAMILY));
}
setShouldRerun();
}
Aggregations