use of org.apache.hadoop.hbase.replication.ReplicationPeerConfig in project hbase by apache.
the class HBaseAdmin method needToReplicate.
/**
* Decide whether the table need replicate to the peer cluster according to the peer config
* @param table name of the table
* @param peerConfig config for the peer
* @return true if the table need replicate to the peer cluster
*/
private boolean needToReplicate(TableName table, ReplicationPeerDescription peer) {
ReplicationPeerConfig peerConfig = peer.getPeerConfig();
Set<String> namespaces = peerConfig.getNamespaces();
Map<TableName, List<String>> tableCFsMap = peerConfig.getTableCFsMap();
// so all the tables data are applicable for replication
if (namespaces == null && tableCFsMap == null) {
return true;
}
if (namespaces != null && namespaces.contains(table.getNamespaceAsString())) {
return true;
}
if (tableCFsMap != null && tableCFsMap.containsKey(table)) {
return true;
}
LOG.debug("Table " + table.getNameAsString() + " doesn't need replicate to peer cluster, peerId=" + peer.getPeerId() + ", clusterKey=" + peerConfig.getClusterKey());
return false;
}
use of org.apache.hadoop.hbase.replication.ReplicationPeerConfig in project hbase by apache.
the class HBaseAdmin method removeReplicationPeerTableCFs.
@Override
public void removeReplicationPeerTableCFs(String id, Map<TableName, ? extends Collection<String>> tableCfs) throws ReplicationException, IOException {
if (tableCfs == null) {
throw new ReplicationException("tableCfs is null");
}
ReplicationPeerConfig peerConfig = getReplicationPeerConfig(id);
Map<TableName, List<String>> preTableCfs = peerConfig.getTableCFsMap();
if (preTableCfs == null) {
throw new ReplicationException("Table-Cfs for peer" + id + " is null");
}
for (Map.Entry<TableName, ? extends Collection<String>> entry : tableCfs.entrySet()) {
TableName table = entry.getKey();
Collection<String> removeCfs = entry.getValue();
if (preTableCfs.containsKey(table)) {
List<String> cfs = preTableCfs.get(table);
if (cfs == null && (removeCfs == null || removeCfs.isEmpty())) {
preTableCfs.remove(table);
} else if (cfs != null && (removeCfs != null && !removeCfs.isEmpty())) {
Set<String> cfSet = new HashSet<String>(cfs);
cfSet.removeAll(removeCfs);
if (cfSet.isEmpty()) {
preTableCfs.remove(table);
} else {
preTableCfs.put(table, Lists.newArrayList(cfSet));
}
} else if (cfs == null && (removeCfs != null && !removeCfs.isEmpty())) {
throw new ReplicationException("Cannot remove cf of table: " + table + " which doesn't specify cfs from table-cfs config in peer: " + id);
} else if (cfs != null && (removeCfs == null || removeCfs.isEmpty())) {
throw new ReplicationException("Cannot remove table: " + table + " which has specified cfs from table-cfs config in peer: " + id);
}
} else {
throw new ReplicationException("No table: " + table + " in table-cfs config of peer: " + id);
}
}
updateReplicationPeerConfig(id, peerConfig);
}
use of org.apache.hadoop.hbase.replication.ReplicationPeerConfig in project hbase by apache.
the class HBaseAdmin method appendReplicationPeerTableCFs.
@Override
public void appendReplicationPeerTableCFs(String id, Map<TableName, ? extends Collection<String>> tableCfs) throws ReplicationException, IOException {
if (tableCfs == null) {
throw new ReplicationException("tableCfs is null");
}
ReplicationPeerConfig peerConfig = getReplicationPeerConfig(id);
Map<TableName, List<String>> preTableCfs = peerConfig.getTableCFsMap();
if (preTableCfs == null) {
peerConfig.setTableCFsMap(tableCfs);
} else {
for (Map.Entry<TableName, ? extends Collection<String>> entry : tableCfs.entrySet()) {
TableName table = entry.getKey();
Collection<String> appendCfs = entry.getValue();
if (preTableCfs.containsKey(table)) {
List<String> cfs = preTableCfs.get(table);
if (cfs == null || appendCfs == null || appendCfs.isEmpty()) {
preTableCfs.put(table, null);
} else {
Set<String> cfSet = new HashSet<String>(cfs);
cfSet.addAll(appendCfs);
preTableCfs.put(table, Lists.newArrayList(cfSet));
}
} else {
if (appendCfs == null || appendCfs.isEmpty()) {
preTableCfs.put(table, null);
} else {
preTableCfs.put(table, Lists.newArrayList(appendCfs));
}
}
}
}
updateReplicationPeerConfig(id, peerConfig);
}
use of org.apache.hadoop.hbase.replication.ReplicationPeerConfig in project hbase by apache.
the class TestReplicaWithCluster method testReplicaAndReplication.
@SuppressWarnings("deprecation")
@Test(timeout = 300000)
public void testReplicaAndReplication() throws Exception {
HTableDescriptor hdt = HTU.createTableDescriptor("testReplicaAndReplication");
hdt.setRegionReplication(NB_SERVERS);
HColumnDescriptor fam = new HColumnDescriptor(row);
fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
hdt.addFamily(fam);
hdt.addCoprocessor(SlowMeCopro.class.getName());
HTU.getAdmin().createTable(hdt, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
Configuration conf2 = HBaseConfiguration.create(HTU.getConfiguration());
conf2.set(HConstants.HBASE_CLIENT_INSTANCE_ID, String.valueOf(-1));
conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
MiniZooKeeperCluster miniZK = HTU.getZkCluster();
HTU2 = new HBaseTestingUtility(conf2);
HTU2.setZkCluster(miniZK);
HTU2.startMiniCluster(NB_SERVERS);
LOG.info("Setup second Zk");
HTU2.getAdmin().createTable(hdt, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
ReplicationAdmin admin = new ReplicationAdmin(HTU.getConfiguration());
ReplicationPeerConfig rpc = new ReplicationPeerConfig();
rpc.setClusterKey(HTU2.getClusterKey());
admin.addPeer("2", rpc, null);
admin.close();
Put p = new Put(row);
p.addColumn(row, row, row);
final Table table = HTU.getConnection().getTable(hdt.getTableName());
table.put(p);
HTU.getAdmin().flush(table.getName());
LOG.info("Put & flush done on the first cluster. Now doing a get on the same cluster.");
Waiter.waitFor(HTU.getConfiguration(), 1000, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
try {
SlowMeCopro.cdl.set(new CountDownLatch(1));
Get g = new Get(row);
g.setConsistency(Consistency.TIMELINE);
Result r = table.get(g);
Assert.assertTrue(r.isStale());
return !r.isEmpty();
} finally {
SlowMeCopro.cdl.get().countDown();
SlowMeCopro.sleepTime.set(0);
}
}
});
table.close();
LOG.info("stale get on the first cluster done. Now for the second.");
final Table table2 = HTU.getConnection().getTable(hdt.getTableName());
Waiter.waitFor(HTU.getConfiguration(), 1000, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
try {
SlowMeCopro.cdl.set(new CountDownLatch(1));
Get g = new Get(row);
g.setConsistency(Consistency.TIMELINE);
Result r = table2.get(g);
Assert.assertTrue(r.isStale());
return !r.isEmpty();
} finally {
SlowMeCopro.cdl.get().countDown();
SlowMeCopro.sleepTime.set(0);
}
}
});
table2.close();
HTU.getAdmin().disableTable(hdt.getTableName());
HTU.deleteTable(hdt.getTableName());
HTU2.getAdmin().disableTable(hdt.getTableName());
HTU2.deleteTable(hdt.getTableName());
// We shutdown HTU2 minicluster later, in afterClass(), as shutting down
// the minicluster has negative impact of deleting all HConnections in JVM.
}
use of org.apache.hadoop.hbase.replication.ReplicationPeerConfig in project hbase by apache.
the class TableCFsUpdater method update.
public boolean update(String peerId) {
String tableCFsNode = getTableCFsNode(peerId);
try {
if (ZKUtil.checkExists(zookeeper, tableCFsNode) != -1) {
String peerNode = getPeerNode(peerId);
ReplicationPeerConfig rpc = getReplicationPeerConig(peerNode);
// We only need to copy data from tableCFs node to rpc Node the first time hmaster start.
if (rpc.getTableCFsMap() == null || rpc.getTableCFsMap().isEmpty()) {
// we copy TableCFs node into PeerNode
LOG.info("copy tableCFs into peerNode:" + peerId);
ReplicationProtos.TableCF[] tableCFs = ReplicationSerDeHelper.parseTableCFs(ZKUtil.getData(this.zookeeper, tableCFsNode));
if (tableCFs != null && tableCFs.length > 0) {
rpc.setTableCFsMap(ReplicationSerDeHelper.convert2Map(tableCFs));
ZKUtil.setData(this.zookeeper, peerNode, ReplicationSerDeHelper.toByteArray(rpc));
}
} else {
LOG.info("No tableCFs in peerNode:" + peerId);
}
}
} catch (KeeperException e) {
LOG.warn("NOTICE!! Update peerId failed, peerId=" + peerId, e);
return false;
} catch (InterruptedException e) {
LOG.warn("NOTICE!! Update peerId failed, peerId=" + peerId, e);
return false;
} catch (IOException e) {
LOG.warn("NOTICE!! Update peerId failed, peerId=" + peerId, e);
return false;
}
return true;
}
Aggregations