Search in sources :

Example 6 with ReplicationAdmin

use of org.apache.hadoop.hbase.client.replication.ReplicationAdmin in project hbase by apache.

the class TestReplicationSyncUpTool method setupReplication.

protected void setupReplication() throws Exception {
    ReplicationAdmin admin1 = new ReplicationAdmin(conf1);
    ReplicationAdmin admin2 = new ReplicationAdmin(conf2);
    Admin ha = utility1.getAdmin();
    ha.createTable(t1_syncupSource);
    ha.createTable(t2_syncupSource);
    ha.close();
    ha = utility2.getAdmin();
    ha.createTable(t1_syncupTarget);
    ha.createTable(t2_syncupTarget);
    ha.close();
    Connection connection1 = ConnectionFactory.createConnection(utility1.getConfiguration());
    Connection connection2 = ConnectionFactory.createConnection(utility2.getConfiguration());
    // Get HTable from Master
    ht1Source = connection1.getTable(t1_su);
    ht1Source.setWriteBufferSize(1024);
    ht2Source = connection1.getTable(t2_su);
    ht1Source.setWriteBufferSize(1024);
    // Get HTable from Peer1
    ht1TargetAtPeer1 = connection2.getTable(t1_su);
    ht1TargetAtPeer1.setWriteBufferSize(1024);
    ht2TargetAtPeer1 = connection2.getTable(t2_su);
    ht2TargetAtPeer1.setWriteBufferSize(1024);
    /**
     * set M-S : Master: utility1 Slave1: utility2
     */
    ReplicationPeerConfig rpc = new ReplicationPeerConfig();
    rpc.setClusterKey(utility2.getClusterKey());
    admin1.addPeer("1", rpc, null);
    admin1.close();
    admin2.close();
}
Also used : ReplicationAdmin(org.apache.hadoop.hbase.client.replication.ReplicationAdmin) Connection(org.apache.hadoop.hbase.client.Connection) Admin(org.apache.hadoop.hbase.client.Admin) ReplicationAdmin(org.apache.hadoop.hbase.client.replication.ReplicationAdmin)

Example 7 with ReplicationAdmin

use of org.apache.hadoop.hbase.client.replication.ReplicationAdmin in project hbase by apache.

the class TestReplicationWithTags method setUpBeforeClass.

@BeforeClass
public static void setUpBeforeClass() throws Exception {
    conf1.setInt("hfile.format.version", 3);
    conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
    conf1.setInt("replication.source.size.capacity", 10240);
    conf1.setLong("replication.source.sleepforretries", 100);
    conf1.setInt("hbase.regionserver.maxlogs", 10);
    conf1.setLong("hbase.master.logcleaner.ttl", 10);
    conf1.setInt("zookeeper.recovery.retry", 1);
    conf1.setInt("zookeeper.recovery.retry.intervalmill", 10);
    conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
    conf1.setInt("replication.stats.thread.period.seconds", 5);
    conf1.setBoolean("hbase.tests.use.shortcircuit.reads", false);
    conf1.setStrings(HConstants.REPLICATION_CODEC_CONF_KEY, KeyValueCodecWithTags.class.getName());
    conf1.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY, TestCoprocessorForTagsAtSource.class.getName());
    utility1 = new HBaseTestingUtility(conf1);
    utility1.startMiniZKCluster();
    MiniZooKeeperCluster miniZK = utility1.getZkCluster();
    // Have to reget conf1 in case zk cluster location different
    // than default
    conf1 = utility1.getConfiguration();
    replicationAdmin = new ReplicationAdmin(conf1);
    LOG.info("Setup first Zk");
    // Base conf2 on conf1 so it gets the right zk cluster.
    conf2 = HBaseConfiguration.create(conf1);
    conf2.setInt("hfile.format.version", 3);
    conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
    conf2.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
    conf2.setBoolean("hbase.tests.use.shortcircuit.reads", false);
    conf2.setStrings(HConstants.REPLICATION_CODEC_CONF_KEY, KeyValueCodecWithTags.class.getName());
    conf2.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY, TestCoprocessorForTagsAtSink.class.getName());
    utility2 = new HBaseTestingUtility(conf2);
    utility2.setZkCluster(miniZK);
    LOG.info("Setup second Zk");
    utility1.startMiniCluster(2);
    utility2.startMiniCluster(2);
    ReplicationPeerConfig rpc = new ReplicationPeerConfig();
    rpc.setClusterKey(utility2.getClusterKey());
    replicationAdmin.addPeer("2", rpc, null);
    HTableDescriptor table = new HTableDescriptor(TABLE_NAME);
    HColumnDescriptor fam = new HColumnDescriptor(FAMILY);
    fam.setMaxVersions(3);
    fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
    table.addFamily(fam);
    try (Connection conn = ConnectionFactory.createConnection(conf1);
        Admin admin = conn.getAdmin()) {
        admin.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
    }
    try (Connection conn = ConnectionFactory.createConnection(conf2);
        Admin admin = conn.getAdmin()) {
        admin.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
    }
    htable1 = utility1.getConnection().getTable(TABLE_NAME);
    htable2 = utility2.getConnection().getTable(TABLE_NAME);
}
Also used : KeyValueCodecWithTags(org.apache.hadoop.hbase.codec.KeyValueCodecWithTags) HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) ReplicationAdmin(org.apache.hadoop.hbase.client.replication.ReplicationAdmin) Connection(org.apache.hadoop.hbase.client.Connection) MiniZooKeeperCluster(org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster) ReplicationAdmin(org.apache.hadoop.hbase.client.replication.ReplicationAdmin) Admin(org.apache.hadoop.hbase.client.Admin) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) BeforeClass(org.junit.BeforeClass)

Example 8 with ReplicationAdmin

use of org.apache.hadoop.hbase.client.replication.ReplicationAdmin in project hbase by apache.

the class TestGlobalThrottler method setUpBeforeClass.

@BeforeClass
public static void setUpBeforeClass() throws Exception {
    conf1 = HBaseConfiguration.create();
    conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
    conf1.setLong("replication.source.sleepforretries", 100);
    // Each WAL is about 120 bytes
    conf1.setInt(HConstants.REPLICATION_SOURCE_TOTAL_BUFFER_KEY, 200);
    conf1.setLong("replication.source.per.peer.node.bandwidth", 100L);
    utility1 = new HBaseTestingUtility(conf1);
    utility1.startMiniZKCluster();
    MiniZooKeeperCluster miniZK = utility1.getZkCluster();
    new ZooKeeperWatcher(conf1, "cluster1", null, true);
    conf2 = new Configuration(conf1);
    conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
    utility2 = new HBaseTestingUtility(conf2);
    utility2.setZkCluster(miniZK);
    new ZooKeeperWatcher(conf2, "cluster2", null, true);
    ReplicationAdmin admin1 = new ReplicationAdmin(conf1);
    ReplicationPeerConfig rpc = new ReplicationPeerConfig();
    rpc.setClusterKey(utility2.getClusterKey());
    utility1.startMiniCluster(1, 1);
    utility2.startMiniCluster(1, 1);
    admin1.addPeer("peer1", rpc, null);
    admin1.addPeer("peer2", rpc, null);
    admin1.addPeer("peer3", rpc, null);
}
Also used : ReplicationPeerConfig(org.apache.hadoop.hbase.replication.ReplicationPeerConfig) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) ZooKeeperWatcher(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher) ReplicationAdmin(org.apache.hadoop.hbase.client.replication.ReplicationAdmin) MiniZooKeeperCluster(org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster) BeforeClass(org.junit.BeforeClass)

Example 9 with ReplicationAdmin

use of org.apache.hadoop.hbase.client.replication.ReplicationAdmin in project phoenix by apache.

the class MutableIndexReplicationIT method setupConfigsAndStartCluster.

private static void setupConfigsAndStartCluster() throws Exception {
    // cluster-1 lives at regular HBase home, so we don't need to change how phoenix handles
    // lookups
    //        conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
    // smaller log roll size to trigger more events
    setUpConfigForMiniCluster(conf1);
    conf1.setFloat("hbase.regionserver.logroll.multiplier", 0.0003f);
    conf1.setInt("replication.source.size.capacity", 10240);
    conf1.setLong("replication.source.sleepforretries", 100);
    conf1.setInt("hbase.regionserver.maxlogs", 10);
    conf1.setLong("hbase.master.logcleaner.ttl", 10);
    conf1.setInt("zookeeper.recovery.retry", 1);
    conf1.setInt("zookeeper.recovery.retry.intervalmill", 10);
    conf1.setBoolean(HConstants.REPLICATION_ENABLE_KEY, HConstants.REPLICATION_ENABLE_DEFAULT);
    conf1.setBoolean("dfs.support.append", true);
    conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
    conf1.setInt("replication.stats.thread.period.seconds", 5);
    conf1.setBoolean("hbase.tests.use.shortcircuit.reads", false);
    utility1 = new HBaseTestingUtility(conf1);
    utility1.startMiniZKCluster();
    MiniZooKeeperCluster miniZK = utility1.getZkCluster();
    // Have to reset conf1 in case zk cluster location different
    // than default
    conf1 = utility1.getConfiguration();
    zkw1 = new ZooKeeperWatcher(conf1, "cluster1", null, true);
    admin = new ReplicationAdmin(conf1);
    LOG.info("Setup first Zk");
    // Base conf2 on conf1 so it gets the right zk cluster, and general cluster configs
    conf2 = HBaseConfiguration.create(conf1);
    conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
    conf2.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
    conf2.setBoolean(HConstants.REPLICATION_ENABLE_KEY, HConstants.REPLICATION_ENABLE_DEFAULT);
    conf2.setBoolean("dfs.support.append", true);
    conf2.setBoolean("hbase.tests.use.shortcircuit.reads", false);
    utility2 = new HBaseTestingUtility(conf2);
    utility2.setZkCluster(miniZK);
    zkw2 = new ZooKeeperWatcher(conf2, "cluster2", null, true);
    //replicate from cluster 1 -> cluster 2, but not back again
    admin.addPeer("1", utility2.getClusterKey());
    LOG.info("Setup second Zk");
    utility1.startMiniCluster(2);
    utility2.startMiniCluster(2);
}
Also used : HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) ZooKeeperWatcher(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher) ReplicationAdmin(org.apache.hadoop.hbase.client.replication.ReplicationAdmin) MiniZooKeeperCluster(org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster)

Example 10 with ReplicationAdmin

use of org.apache.hadoop.hbase.client.replication.ReplicationAdmin in project hbase by apache.

the class TestRegionReplicaReplicationEndpoint method testRegionReplicaReplicationIgnoresDisabledTables.

public void testRegionReplicaReplicationIgnoresDisabledTables(boolean dropTable) throws Exception {
    // tests having edits from a disabled or dropped table is handled correctly by skipping those
    // entries and further edits after the edits from dropped/disabled table can be replicated
    // without problems.
    final TableName tableName = TableName.valueOf(name.getMethodName() + dropTable);
    HTableDescriptor htd = HTU.createTableDescriptor(tableName);
    int regionReplication = 3;
    htd.setRegionReplication(regionReplication);
    HTU.deleteTableIfAny(tableName);
    HTU.getAdmin().createTable(htd);
    TableName toBeDisabledTable = TableName.valueOf(dropTable ? "droppedTable" : "disabledTable");
    HTU.deleteTableIfAny(toBeDisabledTable);
    htd = HTU.createTableDescriptor(toBeDisabledTable.toString());
    htd.setRegionReplication(regionReplication);
    HTU.getAdmin().createTable(htd);
    // both tables are created, now pause replication
    ReplicationAdmin admin = new ReplicationAdmin(HTU.getConfiguration());
    admin.disablePeer(ServerRegionReplicaUtil.getReplicationPeerId());
    // now that the replication is disabled, write to the table to be dropped, then drop the table.
    Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration());
    Table table = connection.getTable(tableName);
    Table tableToBeDisabled = connection.getTable(toBeDisabledTable);
    HTU.loadNumericRows(tableToBeDisabled, HBaseTestingUtility.fam1, 6000, 7000);
    AtomicLong skippedEdits = new AtomicLong();
    RegionReplicaReplicationEndpoint.RegionReplicaOutputSink sink = mock(RegionReplicaReplicationEndpoint.RegionReplicaOutputSink.class);
    when(sink.getSkippedEditsCounter()).thenReturn(skippedEdits);
    RegionReplicaReplicationEndpoint.RegionReplicaSinkWriter sinkWriter = new RegionReplicaReplicationEndpoint.RegionReplicaSinkWriter(sink, (ClusterConnection) connection, Executors.newSingleThreadExecutor(), Integer.MAX_VALUE);
    RegionLocator rl = connection.getRegionLocator(toBeDisabledTable);
    HRegionLocation hrl = rl.getRegionLocation(HConstants.EMPTY_BYTE_ARRAY);
    byte[] encodedRegionName = hrl.getRegionInfo().getEncodedNameAsBytes();
    Entry entry = new Entry(new WALKey(encodedRegionName, toBeDisabledTable, 1), new WALEdit());
    // disable the table
    HTU.getAdmin().disableTable(toBeDisabledTable);
    if (dropTable) {
        HTU.getAdmin().deleteTable(toBeDisabledTable);
    }
    sinkWriter.append(toBeDisabledTable, encodedRegionName, HConstants.EMPTY_BYTE_ARRAY, Lists.newArrayList(entry, entry));
    assertEquals(2, skippedEdits.get());
    try {
        // load some data to the to-be-dropped table
        // load the data to the table
        HTU.loadNumericRows(table, HBaseTestingUtility.fam1, 0, 1000);
        // now enable the replication
        admin.enablePeer(ServerRegionReplicaUtil.getReplicationPeerId());
        verifyReplication(tableName, regionReplication, 0, 1000);
    } finally {
        admin.close();
        table.close();
        rl.close();
        tableToBeDisabled.close();
        HTU.deleteTableIfAny(toBeDisabledTable);
        connection.close();
    }
}
Also used : RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) ReplicationAdmin(org.apache.hadoop.hbase.client.replication.ReplicationAdmin) ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection) Connection(org.apache.hadoop.hbase.client.Connection) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) WALKey(org.apache.hadoop.hbase.wal.WALKey) TableName(org.apache.hadoop.hbase.TableName) AtomicLong(java.util.concurrent.atomic.AtomicLong) Entry(org.apache.hadoop.hbase.wal.WAL.Entry) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) WALEdit(org.apache.hadoop.hbase.regionserver.wal.WALEdit)

Aggregations

ReplicationAdmin (org.apache.hadoop.hbase.client.replication.ReplicationAdmin)13 HBaseTestingUtility (org.apache.hadoop.hbase.HBaseTestingUtility)6 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)6 MiniZooKeeperCluster (org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster)6 Test (org.junit.Test)6 Connection (org.apache.hadoop.hbase.client.Connection)5 ReplicationPeerConfig (org.apache.hadoop.hbase.replication.ReplicationPeerConfig)5 ZooKeeperWatcher (org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher)5 Admin (org.apache.hadoop.hbase.client.Admin)4 BeforeClass (org.junit.BeforeClass)4 Configuration (org.apache.hadoop.conf.Configuration)3 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)3 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)3 Table (org.apache.hadoop.hbase.client.Table)3 ReplicationPeerNotFoundException (org.apache.hadoop.hbase.ReplicationPeerNotFoundException)2 TableName (org.apache.hadoop.hbase.TableName)2 IOException (java.io.IOException)1 ArrayList (java.util.ArrayList)1 HashMap (java.util.HashMap)1 List (java.util.List)1