use of org.apache.hadoop.hbase.client.replication.ReplicationAdmin in project hbase by apache.
the class TestPerTableCFReplication method testPerTableCFReplication.
@Test(timeout = 300000)
public void testPerTableCFReplication() throws Exception {
LOG.info("testPerTableCFReplication");
ReplicationAdmin replicationAdmin = new ReplicationAdmin(conf1);
Connection connection1 = ConnectionFactory.createConnection(conf1);
Connection connection2 = ConnectionFactory.createConnection(conf2);
Connection connection3 = ConnectionFactory.createConnection(conf3);
try {
Admin admin1 = connection1.getAdmin();
Admin admin2 = connection2.getAdmin();
Admin admin3 = connection3.getAdmin();
admin1.createTable(tabA);
admin1.createTable(tabB);
admin1.createTable(tabC);
admin2.createTable(tabA);
admin2.createTable(tabB);
admin2.createTable(tabC);
admin3.createTable(tabA);
admin3.createTable(tabB);
admin3.createTable(tabC);
Table htab1A = connection1.getTable(tabAName);
Table htab2A = connection2.getTable(tabAName);
Table htab3A = connection3.getTable(tabAName);
Table htab1B = connection1.getTable(tabBName);
Table htab2B = connection2.getTable(tabBName);
Table htab3B = connection3.getTable(tabBName);
Table htab1C = connection1.getTable(tabCName);
Table htab2C = connection2.getTable(tabCName);
Table htab3C = connection3.getTable(tabCName);
// A. add cluster2/cluster3 as peers to cluster1
ReplicationPeerConfig rpc2 = new ReplicationPeerConfig();
rpc2.setClusterKey(utility2.getClusterKey());
Map<TableName, List<String>> tableCFs = new HashMap<>();
tableCFs.put(tabCName, null);
tableCFs.put(tabBName, new ArrayList<>());
tableCFs.get(tabBName).add("f1");
tableCFs.get(tabBName).add("f3");
replicationAdmin.addPeer("2", rpc2, tableCFs);
ReplicationPeerConfig rpc3 = new ReplicationPeerConfig();
rpc3.setClusterKey(utility3.getClusterKey());
tableCFs.clear();
tableCFs.put(tabAName, null);
tableCFs.put(tabBName, new ArrayList<>());
tableCFs.get(tabBName).add("f1");
tableCFs.get(tabBName).add("f2");
replicationAdmin.addPeer("3", rpc3, tableCFs);
// A1. tableA can only replicated to cluster3
putAndWaitWithFamily(row1, f1Name, htab1A, htab3A);
ensureRowNotReplicated(row1, f1Name, htab2A);
deleteAndWaitWithFamily(row1, f1Name, htab1A, htab3A);
putAndWaitWithFamily(row1, f2Name, htab1A, htab3A);
ensureRowNotReplicated(row1, f2Name, htab2A);
deleteAndWaitWithFamily(row1, f2Name, htab1A, htab3A);
putAndWaitWithFamily(row1, f3Name, htab1A, htab3A);
ensureRowNotReplicated(row1, f3Name, htab2A);
deleteAndWaitWithFamily(row1, f3Name, htab1A, htab3A);
// A2. cf 'f1' of tableB can replicated to both cluster2 and cluster3
putAndWaitWithFamily(row1, f1Name, htab1B, htab2B, htab3B);
deleteAndWaitWithFamily(row1, f1Name, htab1B, htab2B, htab3B);
// cf 'f2' of tableB can only replicated to cluster3
putAndWaitWithFamily(row1, f2Name, htab1B, htab3B);
ensureRowNotReplicated(row1, f2Name, htab2B);
deleteAndWaitWithFamily(row1, f2Name, htab1B, htab3B);
// cf 'f3' of tableB can only replicated to cluster2
putAndWaitWithFamily(row1, f3Name, htab1B, htab2B);
ensureRowNotReplicated(row1, f3Name, htab3B);
deleteAndWaitWithFamily(row1, f3Name, htab1B, htab2B);
// A3. tableC can only replicated to cluster2
putAndWaitWithFamily(row1, f1Name, htab1C, htab2C);
ensureRowNotReplicated(row1, f1Name, htab3C);
deleteAndWaitWithFamily(row1, f1Name, htab1C, htab2C);
putAndWaitWithFamily(row1, f2Name, htab1C, htab2C);
ensureRowNotReplicated(row1, f2Name, htab3C);
deleteAndWaitWithFamily(row1, f2Name, htab1C, htab2C);
putAndWaitWithFamily(row1, f3Name, htab1C, htab2C);
ensureRowNotReplicated(row1, f3Name, htab3C);
deleteAndWaitWithFamily(row1, f3Name, htab1C, htab2C);
// B. change peers' replicable table-cf config
tableCFs.clear();
tableCFs.put(tabAName, new ArrayList<>());
tableCFs.get(tabAName).add("f1");
tableCFs.get(tabAName).add("f2");
tableCFs.put(tabCName, new ArrayList<>());
tableCFs.get(tabCName).add("f2");
tableCFs.get(tabCName).add("f3");
replicationAdmin.setPeerTableCFs("2", tableCFs);
tableCFs.clear();
tableCFs.put(tabBName, null);
tableCFs.put(tabCName, new ArrayList<>());
tableCFs.get(tabCName).add("f3");
replicationAdmin.setPeerTableCFs("3", tableCFs);
// B1. cf 'f1' of tableA can only replicated to cluster2
putAndWaitWithFamily(row2, f1Name, htab1A, htab2A);
ensureRowNotReplicated(row2, f1Name, htab3A);
deleteAndWaitWithFamily(row2, f1Name, htab1A, htab2A);
// cf 'f2' of tableA can only replicated to cluster2
putAndWaitWithFamily(row2, f2Name, htab1A, htab2A);
ensureRowNotReplicated(row2, f2Name, htab3A);
deleteAndWaitWithFamily(row2, f2Name, htab1A, htab2A);
// cf 'f3' of tableA isn't replicable to either cluster2 or cluster3
putAndWaitWithFamily(row2, f3Name, htab1A);
ensureRowNotReplicated(row2, f3Name, htab2A, htab3A);
deleteAndWaitWithFamily(row2, f3Name, htab1A);
// B2. tableB can only replicated to cluster3
putAndWaitWithFamily(row2, f1Name, htab1B, htab3B);
ensureRowNotReplicated(row2, f1Name, htab2B);
deleteAndWaitWithFamily(row2, f1Name, htab1B, htab3B);
putAndWaitWithFamily(row2, f2Name, htab1B, htab3B);
ensureRowNotReplicated(row2, f2Name, htab2B);
deleteAndWaitWithFamily(row2, f2Name, htab1B, htab3B);
putAndWaitWithFamily(row2, f3Name, htab1B, htab3B);
ensureRowNotReplicated(row2, f3Name, htab2B);
deleteAndWaitWithFamily(row2, f3Name, htab1B, htab3B);
// B3. cf 'f1' of tableC non-replicable to either cluster
putAndWaitWithFamily(row2, f1Name, htab1C);
ensureRowNotReplicated(row2, f1Name, htab2C, htab3C);
deleteAndWaitWithFamily(row2, f1Name, htab1C);
// cf 'f2' of tableC can only replicated to cluster2
putAndWaitWithFamily(row2, f2Name, htab1C, htab2C);
ensureRowNotReplicated(row2, f2Name, htab3C);
deleteAndWaitWithFamily(row2, f2Name, htab1C, htab2C);
// cf 'f3' of tableC can replicated to cluster2 and cluster3
putAndWaitWithFamily(row2, f3Name, htab1C, htab2C, htab3C);
deleteAndWaitWithFamily(row2, f3Name, htab1C, htab2C, htab3C);
} finally {
connection1.close();
connection2.close();
connection3.close();
}
}
use of org.apache.hadoop.hbase.client.replication.ReplicationAdmin in project hbase by apache.
the class TestReplicationBase method setUpBeforeClass.
/**
* @throws java.lang.Exception
*/
@BeforeClass
public static void setUpBeforeClass() throws Exception {
conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
// We don't want too many edits per batch sent to the ReplicationEndpoint to trigger
// sufficient number of events. But we don't want to go too low because
// HBaseInterClusterReplicationEndpoint partitions entries into batches and we want
// more than one batch sent to the peer cluster for better testing.
conf1.setInt("replication.source.size.capacity", 102400);
conf1.setLong("replication.source.sleepforretries", 100);
conf1.setInt("hbase.regionserver.maxlogs", 10);
conf1.setLong("hbase.master.logcleaner.ttl", 10);
conf1.setInt("zookeeper.recovery.retry", 1);
conf1.setInt("zookeeper.recovery.retry.intervalmill", 10);
conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
conf1.setInt("replication.stats.thread.period.seconds", 5);
conf1.setBoolean("hbase.tests.use.shortcircuit.reads", false);
conf1.setLong("replication.sleep.before.failover", 2000);
conf1.setInt("replication.source.maxretriesmultiplier", 10);
conf1.setFloat("replication.source.ratio", 1.0f);
utility1 = new HBaseTestingUtility(conf1);
utility1.startMiniZKCluster();
MiniZooKeeperCluster miniZK = utility1.getZkCluster();
// Have to reget conf1 in case zk cluster location different
// than default
conf1 = utility1.getConfiguration();
zkw1 = new ZooKeeperWatcher(conf1, "cluster1", null, true);
admin = new ReplicationAdmin(conf1);
LOG.info("Setup first Zk");
// Base conf2 on conf1 so it gets the right zk cluster.
conf2 = HBaseConfiguration.create(conf1);
conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
conf2.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
conf2.setBoolean("hbase.tests.use.shortcircuit.reads", false);
utility2 = new HBaseTestingUtility(conf2);
utility2.setZkCluster(miniZK);
zkw2 = new ZooKeeperWatcher(conf2, "cluster2", null, true);
LOG.info("Setup second Zk");
CONF_WITH_LOCALFS = HBaseConfiguration.create(conf1);
utility1.startMiniCluster(2);
// Have a bunch of slave servers, because inter-cluster shipping logic uses number of sinks
// as a component in deciding maximum number of parallel batches to send to the peer cluster.
utility2.startMiniCluster(4);
ReplicationPeerConfig rpc = new ReplicationPeerConfig();
rpc.setClusterKey(utility2.getClusterKey());
hbaseAdmin = ConnectionFactory.createConnection(conf1).getAdmin();
hbaseAdmin.addReplicationPeer("2", rpc);
HTableDescriptor table = new HTableDescriptor(tableName);
HColumnDescriptor fam = new HColumnDescriptor(famName);
fam.setMaxVersions(100);
fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
table.addFamily(fam);
fam = new HColumnDescriptor(noRepfamName);
table.addFamily(fam);
scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (HColumnDescriptor f : table.getColumnFamilies()) {
scopes.put(f.getName(), f.getScope());
}
Connection connection1 = ConnectionFactory.createConnection(conf1);
Connection connection2 = ConnectionFactory.createConnection(conf2);
try (Admin admin1 = connection1.getAdmin()) {
admin1.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
}
try (Admin admin2 = connection2.getAdmin()) {
admin2.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
}
utility1.waitUntilAllRegionsAssigned(tableName);
utility2.waitUntilAllRegionsAssigned(tableName);
htable1 = connection1.getTable(tableName);
htable1.setWriteBufferSize(1024);
htable2 = connection2.getTable(tableName);
}
use of org.apache.hadoop.hbase.client.replication.ReplicationAdmin in project hbase by apache.
the class TestMultiSlaveReplication method testMultiSlaveReplication.
@Test(timeout = 300000)
public void testMultiSlaveReplication() throws Exception {
LOG.info("testCyclicReplication");
MiniHBaseCluster master = utility1.startMiniCluster();
utility2.startMiniCluster();
utility3.startMiniCluster();
ReplicationAdmin admin1 = new ReplicationAdmin(conf1);
utility1.getAdmin().createTable(table);
utility2.getAdmin().createTable(table);
utility3.getAdmin().createTable(table);
Table htable1 = utility1.getConnection().getTable(tableName);
htable1.setWriteBufferSize(1024);
Table htable2 = utility2.getConnection().getTable(tableName);
htable2.setWriteBufferSize(1024);
Table htable3 = utility3.getConnection().getTable(tableName);
htable3.setWriteBufferSize(1024);
ReplicationPeerConfig rpc = new ReplicationPeerConfig();
rpc.setClusterKey(utility2.getClusterKey());
admin1.addPeer("1", rpc, null);
// put "row" and wait 'til it got around, then delete
putAndWait(row, famName, htable1, htable2);
deleteAndWait(row, htable1, htable2);
// check it wasn't replication to cluster 3
checkRow(row, 0, htable3);
putAndWait(row2, famName, htable1, htable2);
// now roll the region server's logs
rollWALAndWait(utility1, htable1.getName(), row2);
// after the log was rolled put a new row
putAndWait(row3, famName, htable1, htable2);
rpc = new ReplicationPeerConfig();
rpc.setClusterKey(utility3.getClusterKey());
admin1.addPeer("2", rpc, null);
// put a row, check it was replicated to all clusters
putAndWait(row1, famName, htable1, htable2, htable3);
// delete and verify
deleteAndWait(row1, htable1, htable2, htable3);
// make sure row2 did not get replicated after
// cluster 3 was added
checkRow(row2, 0, htable3);
// row3 will get replicated, because it was in the
// latest log
checkRow(row3, 1, htable3);
Put p = new Put(row);
p.addColumn(famName, row, row);
htable1.put(p);
// now roll the logs again
rollWALAndWait(utility1, htable1.getName(), row);
// cleanup "row2", also conveniently use this to wait replication
// to finish
deleteAndWait(row2, htable1, htable2, htable3);
// Even if the log was rolled in the middle of the replication
// "row" is still replication.
checkRow(row, 1, htable2);
// Replication thread of cluster 2 may be sleeping, and since row2 is not there in it,
// we should wait before checking.
checkWithWait(row, 1, htable3);
// cleanup the rest
deleteAndWait(row, htable1, htable2, htable3);
deleteAndWait(row3, htable1, htable2, htable3);
utility3.shutdownMiniCluster();
utility2.shutdownMiniCluster();
utility1.shutdownMiniCluster();
}
use of org.apache.hadoop.hbase.client.replication.ReplicationAdmin in project hbase by apache.
the class TestHBaseFsckOneRS method testCheckReplication.
@Test(timeout = 180000)
public void testCheckReplication() throws Exception {
// check no errors
HBaseFsck hbck = doFsck(conf, false);
assertNoErrors(hbck);
// create peer
ReplicationAdmin replicationAdmin = new ReplicationAdmin(conf);
Assert.assertEquals(0, replicationAdmin.getPeersCount());
int zkPort = conf.getInt(HConstants.ZOOKEEPER_CLIENT_PORT, HConstants.DEFAULT_ZOOKEPER_CLIENT_PORT);
ReplicationPeerConfig rpc = new ReplicationPeerConfig();
rpc.setClusterKey("127.0.0.1:" + zkPort + ":/hbase");
replicationAdmin.addPeer("1", rpc, null);
replicationAdmin.getPeersCount();
Assert.assertEquals(1, replicationAdmin.getPeersCount());
// create replicator
ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "Test Hbase Fsck", connection);
ReplicationQueues repQueues = ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(conf, connection, zkw));
repQueues.init("server1");
// queues for current peer, no errors
repQueues.addLog("1", "file1");
repQueues.addLog("1-server2", "file1");
Assert.assertEquals(2, repQueues.getAllQueues().size());
hbck = doFsck(conf, false);
assertNoErrors(hbck);
// queues for removed peer
repQueues.addLog("2", "file1");
repQueues.addLog("2-server2", "file1");
Assert.assertEquals(4, repQueues.getAllQueues().size());
hbck = doFsck(conf, false);
assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { HBaseFsck.ErrorReporter.ERROR_CODE.UNDELETED_REPLICATION_QUEUE, HBaseFsck.ErrorReporter.ERROR_CODE.UNDELETED_REPLICATION_QUEUE });
// fix the case
hbck = doFsck(conf, true);
hbck = doFsck(conf, false);
assertNoErrors(hbck);
// ensure only "2" is deleted
Assert.assertEquals(2, repQueues.getAllQueues().size());
Assert.assertNull(repQueues.getLogsInQueue("2"));
Assert.assertNull(repQueues.getLogsInQueue("2-sever2"));
replicationAdmin.removePeer("1");
repQueues.removeAllQueues();
zkw.close();
replicationAdmin.close();
}
use of org.apache.hadoop.hbase.client.replication.ReplicationAdmin in project hbase by apache.
the class TestRegionReplicaReplicationEndpoint method testRegionReplicaReplicationPeerIsCreated.
@Test
public void testRegionReplicaReplicationPeerIsCreated() throws IOException, ReplicationException {
// create a table with region replicas. Check whether the replication peer is created
// and replication started.
ReplicationAdmin admin = new ReplicationAdmin(HTU.getConfiguration());
String peerId = "region_replica_replication";
ReplicationPeerConfig peerConfig = null;
try {
peerConfig = admin.getPeerConfig(peerId);
} catch (ReplicationPeerNotFoundException e) {
LOG.warn("Region replica replication peer id=" + peerId + " not exist", e);
}
if (peerConfig != null) {
admin.removePeer(peerId);
peerConfig = null;
}
HTableDescriptor htd = HTU.createTableDescriptor("testReplicationPeerIsCreated_no_region_replicas");
HTU.getAdmin().createTable(htd);
try {
peerConfig = admin.getPeerConfig(peerId);
fail("Should throw ReplicationException, because replication peer id=" + peerId + " not exist");
} catch (ReplicationPeerNotFoundException e) {
}
assertNull(peerConfig);
htd = HTU.createTableDescriptor("testReplicationPeerIsCreated");
htd.setRegionReplication(2);
HTU.getAdmin().createTable(htd);
// assert peer configuration is correct
peerConfig = admin.getPeerConfig(peerId);
assertNotNull(peerConfig);
assertEquals(peerConfig.getClusterKey(), ZKConfig.getZooKeeperClusterKey(HTU.getConfiguration()));
assertEquals(peerConfig.getReplicationEndpointImpl(), RegionReplicaReplicationEndpoint.class.getName());
admin.close();
}
Aggregations