Search in sources :

Example 6 with ReplicationException

use of org.apache.hadoop.hbase.replication.ReplicationException in project hbase by apache.

the class ReplicationAdmin method removePeerTableCFs.

/**
   * Remove some table-cfs from config of the specified peer
   * @param id a short name that identifies the cluster
   * @param tableCfs A map from tableName to column family names
   * @throws ReplicationException
   * @throws IOException
   */
@Deprecated
public void removePeerTableCFs(String id, Map<TableName, ? extends Collection<String>> tableCfs) throws ReplicationException, IOException {
    if (tableCfs == null) {
        throw new ReplicationException("tableCfs is null");
    }
    ReplicationPeerConfig peerConfig = admin.getReplicationPeerConfig(id);
    Map<TableName, List<String>> preTableCfs = peerConfig.getTableCFsMap();
    if (preTableCfs == null) {
        throw new ReplicationException("Table-Cfs for peer" + id + " is null");
    }
    for (Map.Entry<TableName, ? extends Collection<String>> entry : tableCfs.entrySet()) {
        TableName table = entry.getKey();
        Collection<String> removeCfs = entry.getValue();
        if (preTableCfs.containsKey(table)) {
            List<String> cfs = preTableCfs.get(table);
            if (cfs == null && (removeCfs == null || removeCfs.isEmpty())) {
                preTableCfs.remove(table);
            } else if (cfs != null && (removeCfs != null && !removeCfs.isEmpty())) {
                Set<String> cfSet = new HashSet<>(cfs);
                cfSet.removeAll(removeCfs);
                if (cfSet.isEmpty()) {
                    preTableCfs.remove(table);
                } else {
                    preTableCfs.put(table, Lists.newArrayList(cfSet));
                }
            } else if (cfs == null && (removeCfs != null && !removeCfs.isEmpty())) {
                throw new ReplicationException("Cannot remove cf of table: " + table + " which doesn't specify cfs from table-cfs config in peer: " + id);
            } else if (cfs != null && (removeCfs == null || removeCfs.isEmpty())) {
                throw new ReplicationException("Cannot remove table: " + table + " which has specified cfs from table-cfs config in peer: " + id);
            }
        } else {
            throw new ReplicationException("No table: " + table + " in table-cfs config of peer: " + id);
        }
    }
    updatePeerConfig(id, peerConfig);
}
Also used : TableName(org.apache.hadoop.hbase.TableName) ReplicationPeerConfig(org.apache.hadoop.hbase.replication.ReplicationPeerConfig) HashSet(java.util.HashSet) Set(java.util.Set) ReplicationException(org.apache.hadoop.hbase.replication.ReplicationException) ArrayList(java.util.ArrayList) List(java.util.List) HashMap(java.util.HashMap) Map(java.util.Map) TreeMap(java.util.TreeMap)

Example 7 with ReplicationException

use of org.apache.hadoop.hbase.replication.ReplicationException in project hbase by apache.

the class ReplicationAdmin method listReplicationPeers.

/**
   * @deprecated use {@link org.apache.hadoop.hbase.client.Admin#listReplicationPeers()} instead
   */
@VisibleForTesting
@Deprecated
List<ReplicationPeer> listReplicationPeers() throws IOException {
    Map<String, ReplicationPeerConfig> peers = listPeerConfigs();
    if (peers == null || peers.size() <= 0) {
        return null;
    }
    List<ReplicationPeer> listOfPeers = new ArrayList<>(peers.size());
    for (Entry<String, ReplicationPeerConfig> peerEntry : peers.entrySet()) {
        String peerId = peerEntry.getKey();
        try {
            Pair<ReplicationPeerConfig, Configuration> pair = this.replicationPeers.getPeerConf(peerId);
            Configuration peerConf = pair.getSecond();
            ReplicationPeer peer = new ReplicationPeerZKImpl(zkw, pair.getSecond(), peerId, pair.getFirst(), this.connection);
            listOfPeers.add(peer);
        } catch (ReplicationException e) {
            LOG.warn("Failed to get valid replication peers. " + "Error connecting to peer cluster with peerId=" + peerId + ". Error message=" + e.getMessage());
            LOG.debug("Failure details to get valid replication peers.", e);
            continue;
        }
    }
    return listOfPeers;
}
Also used : ReplicationPeerConfig(org.apache.hadoop.hbase.replication.ReplicationPeerConfig) Configuration(org.apache.hadoop.conf.Configuration) ArrayList(java.util.ArrayList) ReplicationPeerZKImpl(org.apache.hadoop.hbase.replication.ReplicationPeerZKImpl) ReplicationException(org.apache.hadoop.hbase.replication.ReplicationException) ReplicationPeer(org.apache.hadoop.hbase.replication.ReplicationPeer) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 8 with ReplicationException

use of org.apache.hadoop.hbase.replication.ReplicationException in project hbase by apache.

the class MasterRpcServices method getReplicationPeerConfig.

@Override
public GetReplicationPeerConfigResponse getReplicationPeerConfig(RpcController controller, GetReplicationPeerConfigRequest request) throws ServiceException {
    GetReplicationPeerConfigResponse.Builder response = GetReplicationPeerConfigResponse.newBuilder();
    try {
        String peerId = request.getPeerId();
        ReplicationPeerConfig peerConfig = master.getReplicationPeerConfig(peerId);
        response.setPeerId(peerId);
        response.setPeerConfig(ReplicationSerDeHelper.convert(peerConfig));
    } catch (ReplicationException | IOException e) {
        throw new ServiceException(e);
    }
    return response.build();
}
Also used : ReplicationPeerConfig(org.apache.hadoop.hbase.replication.ReplicationPeerConfig) ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) GetReplicationPeerConfigResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse) ReplicationException(org.apache.hadoop.hbase.replication.ReplicationException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException)

Example 9 with ReplicationException

use of org.apache.hadoop.hbase.replication.ReplicationException in project hbase by apache.

the class HBaseAdmin method removeReplicationPeerTableCFs.

@Override
public void removeReplicationPeerTableCFs(String id, Map<TableName, ? extends Collection<String>> tableCfs) throws ReplicationException, IOException {
    if (tableCfs == null) {
        throw new ReplicationException("tableCfs is null");
    }
    ReplicationPeerConfig peerConfig = getReplicationPeerConfig(id);
    Map<TableName, List<String>> preTableCfs = peerConfig.getTableCFsMap();
    if (preTableCfs == null) {
        throw new ReplicationException("Table-Cfs for peer" + id + " is null");
    }
    for (Map.Entry<TableName, ? extends Collection<String>> entry : tableCfs.entrySet()) {
        TableName table = entry.getKey();
        Collection<String> removeCfs = entry.getValue();
        if (preTableCfs.containsKey(table)) {
            List<String> cfs = preTableCfs.get(table);
            if (cfs == null && (removeCfs == null || removeCfs.isEmpty())) {
                preTableCfs.remove(table);
            } else if (cfs != null && (removeCfs != null && !removeCfs.isEmpty())) {
                Set<String> cfSet = new HashSet<String>(cfs);
                cfSet.removeAll(removeCfs);
                if (cfSet.isEmpty()) {
                    preTableCfs.remove(table);
                } else {
                    preTableCfs.put(table, Lists.newArrayList(cfSet));
                }
            } else if (cfs == null && (removeCfs != null && !removeCfs.isEmpty())) {
                throw new ReplicationException("Cannot remove cf of table: " + table + " which doesn't specify cfs from table-cfs config in peer: " + id);
            } else if (cfs != null && (removeCfs == null || removeCfs.isEmpty())) {
                throw new ReplicationException("Cannot remove table: " + table + " which has specified cfs from table-cfs config in peer: " + id);
            }
        } else {
            throw new ReplicationException("No table: " + table + " in table-cfs config of peer: " + id);
        }
    }
    updateReplicationPeerConfig(id, peerConfig);
}
Also used : TableName(org.apache.hadoop.hbase.TableName) ReplicationPeerConfig(org.apache.hadoop.hbase.replication.ReplicationPeerConfig) Set(java.util.Set) HashSet(java.util.HashSet) ReplicationException(org.apache.hadoop.hbase.replication.ReplicationException) ArrayList(java.util.ArrayList) List(java.util.List) LinkedList(java.util.LinkedList) Map(java.util.Map) TreeMap(java.util.TreeMap) HashMap(java.util.HashMap)

Example 10 with ReplicationException

use of org.apache.hadoop.hbase.replication.ReplicationException in project hbase by apache.

the class HBaseAdmin method appendReplicationPeerTableCFs.

@Override
public void appendReplicationPeerTableCFs(String id, Map<TableName, ? extends Collection<String>> tableCfs) throws ReplicationException, IOException {
    if (tableCfs == null) {
        throw new ReplicationException("tableCfs is null");
    }
    ReplicationPeerConfig peerConfig = getReplicationPeerConfig(id);
    Map<TableName, List<String>> preTableCfs = peerConfig.getTableCFsMap();
    if (preTableCfs == null) {
        peerConfig.setTableCFsMap(tableCfs);
    } else {
        for (Map.Entry<TableName, ? extends Collection<String>> entry : tableCfs.entrySet()) {
            TableName table = entry.getKey();
            Collection<String> appendCfs = entry.getValue();
            if (preTableCfs.containsKey(table)) {
                List<String> cfs = preTableCfs.get(table);
                if (cfs == null || appendCfs == null || appendCfs.isEmpty()) {
                    preTableCfs.put(table, null);
                } else {
                    Set<String> cfSet = new HashSet<String>(cfs);
                    cfSet.addAll(appendCfs);
                    preTableCfs.put(table, Lists.newArrayList(cfSet));
                }
            } else {
                if (appendCfs == null || appendCfs.isEmpty()) {
                    preTableCfs.put(table, null);
                } else {
                    preTableCfs.put(table, Lists.newArrayList(appendCfs));
                }
            }
        }
    }
    updateReplicationPeerConfig(id, peerConfig);
}
Also used : TableName(org.apache.hadoop.hbase.TableName) ReplicationPeerConfig(org.apache.hadoop.hbase.replication.ReplicationPeerConfig) ReplicationException(org.apache.hadoop.hbase.replication.ReplicationException) ArrayList(java.util.ArrayList) List(java.util.List) LinkedList(java.util.LinkedList) Map(java.util.Map) TreeMap(java.util.TreeMap) HashMap(java.util.HashMap) HashSet(java.util.HashSet)

Aggregations

ReplicationException (org.apache.hadoop.hbase.replication.ReplicationException)11 ReplicationPeerConfig (org.apache.hadoop.hbase.replication.ReplicationPeerConfig)9 HashMap (java.util.HashMap)7 ArrayList (java.util.ArrayList)6 List (java.util.List)5 Map (java.util.Map)5 TableName (org.apache.hadoop.hbase.TableName)5 IOException (java.io.IOException)4 HashSet (java.util.HashSet)4 TreeMap (java.util.TreeMap)4 LinkedList (java.util.LinkedList)2 Set (java.util.Set)2 SortedSet (java.util.SortedSet)2 TreeSet (java.util.TreeSet)2 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)2 Configuration (org.apache.hadoop.conf.Configuration)2 ReplicationPeer (org.apache.hadoop.hbase.replication.ReplicationPeer)2 ReplicationPeerZKImpl (org.apache.hadoop.hbase.replication.ReplicationPeerZKImpl)2 VisibleForTesting (com.google.common.annotations.VisibleForTesting)1 Path (org.apache.hadoop.fs.Path)1