Search in sources :

Example 36 with ReplicationPeerConfig

use of org.apache.hadoop.hbase.replication.ReplicationPeerConfig in project hbase by apache.

the class HBaseAdmin method needToReplicate.

/**
   * Decide whether the table need replicate to the peer cluster according to the peer config
   * @param table name of the table
   * @param peerConfig config for the peer
   * @return true if the table need replicate to the peer cluster
   */
private boolean needToReplicate(TableName table, ReplicationPeerDescription peer) {
    ReplicationPeerConfig peerConfig = peer.getPeerConfig();
    Set<String> namespaces = peerConfig.getNamespaces();
    Map<TableName, List<String>> tableCFsMap = peerConfig.getTableCFsMap();
    // so all the tables data are applicable for replication
    if (namespaces == null && tableCFsMap == null) {
        return true;
    }
    if (namespaces != null && namespaces.contains(table.getNamespaceAsString())) {
        return true;
    }
    if (tableCFsMap != null && tableCFsMap.containsKey(table)) {
        return true;
    }
    LOG.debug("Table " + table.getNameAsString() + " doesn't need replicate to peer cluster, peerId=" + peer.getPeerId() + ", clusterKey=" + peerConfig.getClusterKey());
    return false;
}
Also used : TableName(org.apache.hadoop.hbase.TableName) ReplicationPeerConfig(org.apache.hadoop.hbase.replication.ReplicationPeerConfig) ArrayList(java.util.ArrayList) List(java.util.List) LinkedList(java.util.LinkedList)

Example 37 with ReplicationPeerConfig

use of org.apache.hadoop.hbase.replication.ReplicationPeerConfig in project hbase by apache.

the class HBaseAdmin method removeReplicationPeerTableCFs.

@Override
public void removeReplicationPeerTableCFs(String id, Map<TableName, ? extends Collection<String>> tableCfs) throws ReplicationException, IOException {
    if (tableCfs == null) {
        throw new ReplicationException("tableCfs is null");
    }
    ReplicationPeerConfig peerConfig = getReplicationPeerConfig(id);
    Map<TableName, List<String>> preTableCfs = peerConfig.getTableCFsMap();
    if (preTableCfs == null) {
        throw new ReplicationException("Table-Cfs for peer" + id + " is null");
    }
    for (Map.Entry<TableName, ? extends Collection<String>> entry : tableCfs.entrySet()) {
        TableName table = entry.getKey();
        Collection<String> removeCfs = entry.getValue();
        if (preTableCfs.containsKey(table)) {
            List<String> cfs = preTableCfs.get(table);
            if (cfs == null && (removeCfs == null || removeCfs.isEmpty())) {
                preTableCfs.remove(table);
            } else if (cfs != null && (removeCfs != null && !removeCfs.isEmpty())) {
                Set<String> cfSet = new HashSet<String>(cfs);
                cfSet.removeAll(removeCfs);
                if (cfSet.isEmpty()) {
                    preTableCfs.remove(table);
                } else {
                    preTableCfs.put(table, Lists.newArrayList(cfSet));
                }
            } else if (cfs == null && (removeCfs != null && !removeCfs.isEmpty())) {
                throw new ReplicationException("Cannot remove cf of table: " + table + " which doesn't specify cfs from table-cfs config in peer: " + id);
            } else if (cfs != null && (removeCfs == null || removeCfs.isEmpty())) {
                throw new ReplicationException("Cannot remove table: " + table + " which has specified cfs from table-cfs config in peer: " + id);
            }
        } else {
            throw new ReplicationException("No table: " + table + " in table-cfs config of peer: " + id);
        }
    }
    updateReplicationPeerConfig(id, peerConfig);
}
Also used : TableName(org.apache.hadoop.hbase.TableName) ReplicationPeerConfig(org.apache.hadoop.hbase.replication.ReplicationPeerConfig) Set(java.util.Set) HashSet(java.util.HashSet) ReplicationException(org.apache.hadoop.hbase.replication.ReplicationException) ArrayList(java.util.ArrayList) List(java.util.List) LinkedList(java.util.LinkedList) Map(java.util.Map) TreeMap(java.util.TreeMap) HashMap(java.util.HashMap)

Example 38 with ReplicationPeerConfig

use of org.apache.hadoop.hbase.replication.ReplicationPeerConfig in project hbase by apache.

the class HBaseAdmin method appendReplicationPeerTableCFs.

@Override
public void appendReplicationPeerTableCFs(String id, Map<TableName, ? extends Collection<String>> tableCfs) throws ReplicationException, IOException {
    if (tableCfs == null) {
        throw new ReplicationException("tableCfs is null");
    }
    ReplicationPeerConfig peerConfig = getReplicationPeerConfig(id);
    Map<TableName, List<String>> preTableCfs = peerConfig.getTableCFsMap();
    if (preTableCfs == null) {
        peerConfig.setTableCFsMap(tableCfs);
    } else {
        for (Map.Entry<TableName, ? extends Collection<String>> entry : tableCfs.entrySet()) {
            TableName table = entry.getKey();
            Collection<String> appendCfs = entry.getValue();
            if (preTableCfs.containsKey(table)) {
                List<String> cfs = preTableCfs.get(table);
                if (cfs == null || appendCfs == null || appendCfs.isEmpty()) {
                    preTableCfs.put(table, null);
                } else {
                    Set<String> cfSet = new HashSet<String>(cfs);
                    cfSet.addAll(appendCfs);
                    preTableCfs.put(table, Lists.newArrayList(cfSet));
                }
            } else {
                if (appendCfs == null || appendCfs.isEmpty()) {
                    preTableCfs.put(table, null);
                } else {
                    preTableCfs.put(table, Lists.newArrayList(appendCfs));
                }
            }
        }
    }
    updateReplicationPeerConfig(id, peerConfig);
}
Also used : TableName(org.apache.hadoop.hbase.TableName) ReplicationPeerConfig(org.apache.hadoop.hbase.replication.ReplicationPeerConfig) ReplicationException(org.apache.hadoop.hbase.replication.ReplicationException) ArrayList(java.util.ArrayList) List(java.util.List) LinkedList(java.util.LinkedList) Map(java.util.Map) TreeMap(java.util.TreeMap) HashMap(java.util.HashMap) HashSet(java.util.HashSet)

Example 39 with ReplicationPeerConfig

use of org.apache.hadoop.hbase.replication.ReplicationPeerConfig in project hbase by apache.

the class TestReplicaWithCluster method testReplicaAndReplication.

@SuppressWarnings("deprecation")
@Test(timeout = 300000)
public void testReplicaAndReplication() throws Exception {
    HTableDescriptor hdt = HTU.createTableDescriptor("testReplicaAndReplication");
    hdt.setRegionReplication(NB_SERVERS);
    HColumnDescriptor fam = new HColumnDescriptor(row);
    fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
    hdt.addFamily(fam);
    hdt.addCoprocessor(SlowMeCopro.class.getName());
    HTU.getAdmin().createTable(hdt, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
    Configuration conf2 = HBaseConfiguration.create(HTU.getConfiguration());
    conf2.set(HConstants.HBASE_CLIENT_INSTANCE_ID, String.valueOf(-1));
    conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
    MiniZooKeeperCluster miniZK = HTU.getZkCluster();
    HTU2 = new HBaseTestingUtility(conf2);
    HTU2.setZkCluster(miniZK);
    HTU2.startMiniCluster(NB_SERVERS);
    LOG.info("Setup second Zk");
    HTU2.getAdmin().createTable(hdt, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
    ReplicationAdmin admin = new ReplicationAdmin(HTU.getConfiguration());
    ReplicationPeerConfig rpc = new ReplicationPeerConfig();
    rpc.setClusterKey(HTU2.getClusterKey());
    admin.addPeer("2", rpc, null);
    admin.close();
    Put p = new Put(row);
    p.addColumn(row, row, row);
    final Table table = HTU.getConnection().getTable(hdt.getTableName());
    table.put(p);
    HTU.getAdmin().flush(table.getName());
    LOG.info("Put & flush done on the first cluster. Now doing a get on the same cluster.");
    Waiter.waitFor(HTU.getConfiguration(), 1000, new Waiter.Predicate<Exception>() {

        @Override
        public boolean evaluate() throws Exception {
            try {
                SlowMeCopro.cdl.set(new CountDownLatch(1));
                Get g = new Get(row);
                g.setConsistency(Consistency.TIMELINE);
                Result r = table.get(g);
                Assert.assertTrue(r.isStale());
                return !r.isEmpty();
            } finally {
                SlowMeCopro.cdl.get().countDown();
                SlowMeCopro.sleepTime.set(0);
            }
        }
    });
    table.close();
    LOG.info("stale get on the first cluster done. Now for the second.");
    final Table table2 = HTU.getConnection().getTable(hdt.getTableName());
    Waiter.waitFor(HTU.getConfiguration(), 1000, new Waiter.Predicate<Exception>() {

        @Override
        public boolean evaluate() throws Exception {
            try {
                SlowMeCopro.cdl.set(new CountDownLatch(1));
                Get g = new Get(row);
                g.setConsistency(Consistency.TIMELINE);
                Result r = table2.get(g);
                Assert.assertTrue(r.isStale());
                return !r.isEmpty();
            } finally {
                SlowMeCopro.cdl.get().countDown();
                SlowMeCopro.sleepTime.set(0);
            }
        }
    });
    table2.close();
    HTU.getAdmin().disableTable(hdt.getTableName());
    HTU.deleteTable(hdt.getTableName());
    HTU2.getAdmin().disableTable(hdt.getTableName());
    HTU2.deleteTable(hdt.getTableName());
// We shutdown HTU2 minicluster later, in afterClass(), as shutting down
// the minicluster has negative impact of deleting all HConnections in JVM.
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) ReplicationAdmin(org.apache.hadoop.hbase.client.replication.ReplicationAdmin) MiniZooKeeperCluster(org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster) CountDownLatch(java.util.concurrent.CountDownLatch) IOException(java.io.IOException) RegionServerStoppedException(org.apache.hadoop.hbase.regionserver.RegionServerStoppedException) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) ReplicationPeerConfig(org.apache.hadoop.hbase.replication.ReplicationPeerConfig) HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) Waiter(org.apache.hadoop.hbase.Waiter) Test(org.junit.Test)

Example 40 with ReplicationPeerConfig

use of org.apache.hadoop.hbase.replication.ReplicationPeerConfig in project hbase by apache.

the class TableCFsUpdater method update.

public boolean update(String peerId) {
    String tableCFsNode = getTableCFsNode(peerId);
    try {
        if (ZKUtil.checkExists(zookeeper, tableCFsNode) != -1) {
            String peerNode = getPeerNode(peerId);
            ReplicationPeerConfig rpc = getReplicationPeerConig(peerNode);
            // We only need to copy data from tableCFs node to rpc Node the first time hmaster start.
            if (rpc.getTableCFsMap() == null || rpc.getTableCFsMap().isEmpty()) {
                // we copy TableCFs node into PeerNode
                LOG.info("copy tableCFs into peerNode:" + peerId);
                ReplicationProtos.TableCF[] tableCFs = ReplicationSerDeHelper.parseTableCFs(ZKUtil.getData(this.zookeeper, tableCFsNode));
                if (tableCFs != null && tableCFs.length > 0) {
                    rpc.setTableCFsMap(ReplicationSerDeHelper.convert2Map(tableCFs));
                    ZKUtil.setData(this.zookeeper, peerNode, ReplicationSerDeHelper.toByteArray(rpc));
                }
            } else {
                LOG.info("No tableCFs in peerNode:" + peerId);
            }
        }
    } catch (KeeperException e) {
        LOG.warn("NOTICE!! Update peerId failed, peerId=" + peerId, e);
        return false;
    } catch (InterruptedException e) {
        LOG.warn("NOTICE!! Update peerId failed, peerId=" + peerId, e);
        return false;
    } catch (IOException e) {
        LOG.warn("NOTICE!! Update peerId failed, peerId=" + peerId, e);
        return false;
    }
    return true;
}
Also used : ReplicationPeerConfig(org.apache.hadoop.hbase.replication.ReplicationPeerConfig) IOException(java.io.IOException) KeeperException(org.apache.zookeeper.KeeperException)

Aggregations

ReplicationPeerConfig (org.apache.hadoop.hbase.replication.ReplicationPeerConfig)42 Test (org.junit.Test)18 ReplicationException (org.apache.hadoop.hbase.replication.ReplicationException)11 IOException (java.io.IOException)10 ArrayList (java.util.ArrayList)10 List (java.util.List)10 TableName (org.apache.hadoop.hbase.TableName)10 HashMap (java.util.HashMap)8 HashSet (java.util.HashSet)7 Configuration (org.apache.hadoop.conf.Configuration)7 ReplicationAdmin (org.apache.hadoop.hbase.client.replication.ReplicationAdmin)7 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)6 ReplicationPeerNotFoundException (org.apache.hadoop.hbase.ReplicationPeerNotFoundException)6 ZooKeeperWatcher (org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher)6 TreeMap (java.util.TreeMap)5 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)5 HBaseTestingUtility (org.apache.hadoop.hbase.HBaseTestingUtility)5 Map (java.util.Map)4 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)4 ReplicationPeerDescription (org.apache.hadoop.hbase.replication.ReplicationPeerDescription)4