use of org.apache.hadoop.hbase.replication.ReplicationException in project hbase by apache.
the class ReplicationSourceManager method recordLog.
/**
* Check and enqueue the given log to the correct source. If there's still no source for the
* group to which the given log belongs, create one
* @param logPath the log path to check and enqueue
* @throws IOException
*/
private void recordLog(Path logPath) throws IOException {
String logName = logPath.getName();
String logPrefix = AbstractFSWALProvider.getWALPrefixFromWALName(logName);
// synchronize on replicationPeers to avoid adding source for the to-be-removed peer
synchronized (replicationPeers) {
for (String id : replicationPeers.getConnectedPeerIds()) {
try {
this.replicationQueues.addLog(id, logName);
} catch (ReplicationException e) {
throw new IOException("Cannot add log to replication queue" + " when creating a new source, queueId=" + id + ", filename=" + logName, e);
}
}
}
// update walsById map
synchronized (walsById) {
for (Map.Entry<String, Map<String, SortedSet<String>>> entry : this.walsById.entrySet()) {
String peerId = entry.getKey();
Map<String, SortedSet<String>> walsByPrefix = entry.getValue();
boolean existingPrefix = false;
for (Map.Entry<String, SortedSet<String>> walsEntry : walsByPrefix.entrySet()) {
SortedSet<String> wals = walsEntry.getValue();
if (this.sources.isEmpty()) {
// If there's no slaves, don't need to keep the old wals since
// we only consider the last one when a new slave comes in
wals.clear();
}
if (logPrefix.equals(walsEntry.getKey())) {
wals.add(logName);
existingPrefix = true;
}
}
if (!existingPrefix) {
// The new log belongs to a new group, add it into this peer
LOG.debug("Start tracking logs for wal group " + logPrefix + " for peer " + peerId);
SortedSet<String> wals = new TreeSet<>();
wals.add(logName);
walsByPrefix.put(logPrefix, wals);
}
}
}
}
use of org.apache.hadoop.hbase.replication.ReplicationException in project hbase by apache.
the class Replication method startReplicationService.
/**
* If replication is enabled and this cluster is a master,
* it starts
* @throws IOException
*/
public void startReplicationService() throws IOException {
try {
this.replicationManager.init();
} catch (ReplicationException e) {
throw new IOException(e);
}
this.replicationSink = new ReplicationSink(this.conf, this.server);
this.scheduleThreadPool.scheduleAtFixedRate(new ReplicationStatisticsThread(this.replicationSink, this.replicationManager), statsThreadPeriod, statsThreadPeriod, TimeUnit.SECONDS);
}
use of org.apache.hadoop.hbase.replication.ReplicationException in project hbase by apache.
the class ReplicationAdmin method appendPeerTableCFs.
/**
* Append the replicable table-cf config of the specified peer
* @param id a short that identifies the cluster
* @param tableCfs A map from tableName to column family names
* @throws ReplicationException
* @throws IOException
*/
@Deprecated
public void appendPeerTableCFs(String id, Map<TableName, ? extends Collection<String>> tableCfs) throws ReplicationException, IOException {
if (tableCfs == null) {
throw new ReplicationException("tableCfs is null");
}
ReplicationPeerConfig peerConfig = admin.getReplicationPeerConfig(id);
Map<TableName, List<String>> preTableCfs = peerConfig.getTableCFsMap();
if (preTableCfs == null) {
setPeerTableCFs(id, tableCfs);
return;
}
for (Map.Entry<TableName, ? extends Collection<String>> entry : tableCfs.entrySet()) {
TableName table = entry.getKey();
Collection<String> appendCfs = entry.getValue();
if (preTableCfs.containsKey(table)) {
List<String> cfs = preTableCfs.get(table);
if (cfs == null || appendCfs == null || appendCfs.isEmpty()) {
preTableCfs.put(table, null);
} else {
Set<String> cfSet = new HashSet<>(cfs);
cfSet.addAll(appendCfs);
preTableCfs.put(table, Lists.newArrayList(cfSet));
}
} else {
if (appendCfs == null || appendCfs.isEmpty()) {
preTableCfs.put(table, null);
} else {
preTableCfs.put(table, Lists.newArrayList(appendCfs));
}
}
}
updatePeerConfig(id, peerConfig);
}
use of org.apache.hadoop.hbase.replication.ReplicationException in project hbase by apache.
the class TestReplicationAdmin method testRemovePeerTableCFs.
@Test
public void testRemovePeerTableCFs() throws Exception {
ReplicationPeerConfig rpc1 = new ReplicationPeerConfig();
rpc1.setClusterKey(KEY_ONE);
final TableName tableName1 = TableName.valueOf(name.getMethodName() + "t1");
final TableName tableName2 = TableName.valueOf(name.getMethodName() + "t2");
final TableName tableName3 = TableName.valueOf(name.getMethodName() + "t3");
final TableName tableName4 = TableName.valueOf(name.getMethodName() + "t4");
// Add a valid peer
admin.addPeer(ID_ONE, rpc1, null);
Map<TableName, List<String>> tableCFs = new HashMap<>();
try {
tableCFs.put(tableName3, null);
admin.removePeerTableCFs(ID_ONE, tableCFs);
assertTrue(false);
} catch (ReplicationException e) {
}
assertNull(admin.getPeerTableCFs(ID_ONE));
tableCFs.clear();
tableCFs.put(tableName1, null);
tableCFs.put(tableName2, new ArrayList<>());
tableCFs.get(tableName2).add("cf1");
admin.setPeerTableCFs(ID_ONE, tableCFs);
try {
tableCFs.clear();
tableCFs.put(tableName3, null);
admin.removePeerTableCFs(ID_ONE, tableCFs);
assertTrue(false);
} catch (ReplicationException e) {
}
Map<TableName, List<String>> result = ReplicationSerDeHelper.parseTableCFsFromConfig(admin.getPeerTableCFs(ID_ONE));
assertEquals(2, result.size());
assertTrue("Should contain t1", result.containsKey(tableName1));
assertTrue("Should contain t2", result.containsKey(tableName2));
assertNull(result.get(tableName1));
assertEquals(1, result.get(tableName2).size());
assertEquals("cf1", result.get(tableName2).get(0));
try {
tableCFs.clear();
tableCFs.put(tableName1, new ArrayList<>());
tableCFs.get(tableName1).add("f1");
admin.removePeerTableCFs(ID_ONE, tableCFs);
assertTrue(false);
} catch (ReplicationException e) {
}
tableCFs.clear();
tableCFs.put(tableName1, null);
admin.removePeerTableCFs(ID_ONE, tableCFs);
result = ReplicationSerDeHelper.parseTableCFsFromConfig(admin.getPeerTableCFs(ID_ONE));
assertEquals(1, result.size());
assertEquals(1, result.get(tableName2).size());
assertEquals("cf1", result.get(tableName2).get(0));
try {
tableCFs.clear();
tableCFs.put(tableName2, null);
admin.removePeerTableCFs(ID_ONE, tableCFs);
assertTrue(false);
} catch (ReplicationException e) {
}
tableCFs.clear();
tableCFs.put(tableName2, new ArrayList<>());
tableCFs.get(tableName2).add("cf1");
admin.removePeerTableCFs(ID_ONE, tableCFs);
assertNull(admin.getPeerTableCFs(ID_ONE));
tableCFs.clear();
tableCFs.put(tableName4, new ArrayList<>());
admin.setPeerTableCFs(ID_ONE, tableCFs);
admin.removePeerTableCFs(ID_ONE, tableCFs);
assertNull(admin.getPeerTableCFs(ID_ONE));
admin.removePeer(ID_ONE);
}
use of org.apache.hadoop.hbase.replication.ReplicationException in project hbase by apache.
the class VerifyReplication method getPeerQuorumConfig.
private static Pair<ReplicationPeerConfig, Configuration> getPeerQuorumConfig(final Configuration conf) throws IOException {
ZooKeeperWatcher localZKW = null;
ReplicationPeerZKImpl peer = null;
try {
localZKW = new ZooKeeperWatcher(conf, "VerifyReplication", new Abortable() {
@Override
public void abort(String why, Throwable e) {
}
@Override
public boolean isAborted() {
return false;
}
});
ReplicationPeers rp = ReplicationFactory.getReplicationPeers(localZKW, conf, localZKW);
rp.init();
Pair<ReplicationPeerConfig, Configuration> pair = rp.getPeerConf(peerId);
if (pair == null) {
throw new IOException("Couldn't get peer conf!");
}
return pair;
} catch (ReplicationException e) {
throw new IOException("An error occurred while trying to connect to the remove peer cluster", e);
} finally {
if (peer != null) {
peer.close();
}
if (localZKW != null) {
localZKW.close();
}
}
}
Aggregations