use of org.apache.hadoop.hbase.replication.ReplicationException in project hbase by apache.
the class ReplicationAdmin method removePeerTableCFs.
/**
* Remove some table-cfs from config of the specified peer
* @param id a short name that identifies the cluster
* @param tableCfs A map from tableName to column family names
* @throws ReplicationException
* @throws IOException
*/
@Deprecated
public void removePeerTableCFs(String id, Map<TableName, ? extends Collection<String>> tableCfs) throws ReplicationException, IOException {
if (tableCfs == null) {
throw new ReplicationException("tableCfs is null");
}
ReplicationPeerConfig peerConfig = admin.getReplicationPeerConfig(id);
Map<TableName, List<String>> preTableCfs = peerConfig.getTableCFsMap();
if (preTableCfs == null) {
throw new ReplicationException("Table-Cfs for peer" + id + " is null");
}
for (Map.Entry<TableName, ? extends Collection<String>> entry : tableCfs.entrySet()) {
TableName table = entry.getKey();
Collection<String> removeCfs = entry.getValue();
if (preTableCfs.containsKey(table)) {
List<String> cfs = preTableCfs.get(table);
if (cfs == null && (removeCfs == null || removeCfs.isEmpty())) {
preTableCfs.remove(table);
} else if (cfs != null && (removeCfs != null && !removeCfs.isEmpty())) {
Set<String> cfSet = new HashSet<>(cfs);
cfSet.removeAll(removeCfs);
if (cfSet.isEmpty()) {
preTableCfs.remove(table);
} else {
preTableCfs.put(table, Lists.newArrayList(cfSet));
}
} else if (cfs == null && (removeCfs != null && !removeCfs.isEmpty())) {
throw new ReplicationException("Cannot remove cf of table: " + table + " which doesn't specify cfs from table-cfs config in peer: " + id);
} else if (cfs != null && (removeCfs == null || removeCfs.isEmpty())) {
throw new ReplicationException("Cannot remove table: " + table + " which has specified cfs from table-cfs config in peer: " + id);
}
} else {
throw new ReplicationException("No table: " + table + " in table-cfs config of peer: " + id);
}
}
updatePeerConfig(id, peerConfig);
}
use of org.apache.hadoop.hbase.replication.ReplicationException in project hbase by apache.
the class ReplicationAdmin method listReplicationPeers.
/**
* @deprecated use {@link org.apache.hadoop.hbase.client.Admin#listReplicationPeers()} instead
*/
@VisibleForTesting
@Deprecated
List<ReplicationPeer> listReplicationPeers() throws IOException {
Map<String, ReplicationPeerConfig> peers = listPeerConfigs();
if (peers == null || peers.size() <= 0) {
return null;
}
List<ReplicationPeer> listOfPeers = new ArrayList<>(peers.size());
for (Entry<String, ReplicationPeerConfig> peerEntry : peers.entrySet()) {
String peerId = peerEntry.getKey();
try {
Pair<ReplicationPeerConfig, Configuration> pair = this.replicationPeers.getPeerConf(peerId);
Configuration peerConf = pair.getSecond();
ReplicationPeer peer = new ReplicationPeerZKImpl(zkw, pair.getSecond(), peerId, pair.getFirst(), this.connection);
listOfPeers.add(peer);
} catch (ReplicationException e) {
LOG.warn("Failed to get valid replication peers. " + "Error connecting to peer cluster with peerId=" + peerId + ". Error message=" + e.getMessage());
LOG.debug("Failure details to get valid replication peers.", e);
continue;
}
}
return listOfPeers;
}
use of org.apache.hadoop.hbase.replication.ReplicationException in project hbase by apache.
the class MasterRpcServices method getReplicationPeerConfig.
@Override
public GetReplicationPeerConfigResponse getReplicationPeerConfig(RpcController controller, GetReplicationPeerConfigRequest request) throws ServiceException {
GetReplicationPeerConfigResponse.Builder response = GetReplicationPeerConfigResponse.newBuilder();
try {
String peerId = request.getPeerId();
ReplicationPeerConfig peerConfig = master.getReplicationPeerConfig(peerId);
response.setPeerId(peerId);
response.setPeerConfig(ReplicationSerDeHelper.convert(peerConfig));
} catch (ReplicationException | IOException e) {
throw new ServiceException(e);
}
return response.build();
}
use of org.apache.hadoop.hbase.replication.ReplicationException in project hbase by apache.
the class HBaseAdmin method removeReplicationPeerTableCFs.
@Override
public void removeReplicationPeerTableCFs(String id, Map<TableName, ? extends Collection<String>> tableCfs) throws ReplicationException, IOException {
if (tableCfs == null) {
throw new ReplicationException("tableCfs is null");
}
ReplicationPeerConfig peerConfig = getReplicationPeerConfig(id);
Map<TableName, List<String>> preTableCfs = peerConfig.getTableCFsMap();
if (preTableCfs == null) {
throw new ReplicationException("Table-Cfs for peer" + id + " is null");
}
for (Map.Entry<TableName, ? extends Collection<String>> entry : tableCfs.entrySet()) {
TableName table = entry.getKey();
Collection<String> removeCfs = entry.getValue();
if (preTableCfs.containsKey(table)) {
List<String> cfs = preTableCfs.get(table);
if (cfs == null && (removeCfs == null || removeCfs.isEmpty())) {
preTableCfs.remove(table);
} else if (cfs != null && (removeCfs != null && !removeCfs.isEmpty())) {
Set<String> cfSet = new HashSet<String>(cfs);
cfSet.removeAll(removeCfs);
if (cfSet.isEmpty()) {
preTableCfs.remove(table);
} else {
preTableCfs.put(table, Lists.newArrayList(cfSet));
}
} else if (cfs == null && (removeCfs != null && !removeCfs.isEmpty())) {
throw new ReplicationException("Cannot remove cf of table: " + table + " which doesn't specify cfs from table-cfs config in peer: " + id);
} else if (cfs != null && (removeCfs == null || removeCfs.isEmpty())) {
throw new ReplicationException("Cannot remove table: " + table + " which has specified cfs from table-cfs config in peer: " + id);
}
} else {
throw new ReplicationException("No table: " + table + " in table-cfs config of peer: " + id);
}
}
updateReplicationPeerConfig(id, peerConfig);
}
use of org.apache.hadoop.hbase.replication.ReplicationException in project hbase by apache.
the class HBaseAdmin method appendReplicationPeerTableCFs.
@Override
public void appendReplicationPeerTableCFs(String id, Map<TableName, ? extends Collection<String>> tableCfs) throws ReplicationException, IOException {
if (tableCfs == null) {
throw new ReplicationException("tableCfs is null");
}
ReplicationPeerConfig peerConfig = getReplicationPeerConfig(id);
Map<TableName, List<String>> preTableCfs = peerConfig.getTableCFsMap();
if (preTableCfs == null) {
peerConfig.setTableCFsMap(tableCfs);
} else {
for (Map.Entry<TableName, ? extends Collection<String>> entry : tableCfs.entrySet()) {
TableName table = entry.getKey();
Collection<String> appendCfs = entry.getValue();
if (preTableCfs.containsKey(table)) {
List<String> cfs = preTableCfs.get(table);
if (cfs == null || appendCfs == null || appendCfs.isEmpty()) {
preTableCfs.put(table, null);
} else {
Set<String> cfSet = new HashSet<String>(cfs);
cfSet.addAll(appendCfs);
preTableCfs.put(table, Lists.newArrayList(cfSet));
}
} else {
if (appendCfs == null || appendCfs.isEmpty()) {
preTableCfs.put(table, null);
} else {
preTableCfs.put(table, Lists.newArrayList(appendCfs));
}
}
}
}
updateReplicationPeerConfig(id, peerConfig);
}
Aggregations