Search in sources :

Example 46 with ZooKeeperWatcher

use of org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher in project hbase by apache.

the class TestReplicationBase method setUpBeforeClass.

/**
   * @throws java.lang.Exception
   */
@BeforeClass
public static void setUpBeforeClass() throws Exception {
    conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
    // We don't want too many edits per batch sent to the ReplicationEndpoint to trigger
    // sufficient number of events. But we don't want to go too low because
    // HBaseInterClusterReplicationEndpoint partitions entries into batches and we want
    // more than one batch sent to the peer cluster for better testing.
    conf1.setInt("replication.source.size.capacity", 102400);
    conf1.setLong("replication.source.sleepforretries", 100);
    conf1.setInt("hbase.regionserver.maxlogs", 10);
    conf1.setLong("hbase.master.logcleaner.ttl", 10);
    conf1.setInt("zookeeper.recovery.retry", 1);
    conf1.setInt("zookeeper.recovery.retry.intervalmill", 10);
    conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
    conf1.setInt("replication.stats.thread.period.seconds", 5);
    conf1.setBoolean("hbase.tests.use.shortcircuit.reads", false);
    conf1.setLong("replication.sleep.before.failover", 2000);
    conf1.setInt("replication.source.maxretriesmultiplier", 10);
    conf1.setFloat("replication.source.ratio", 1.0f);
    utility1 = new HBaseTestingUtility(conf1);
    utility1.startMiniZKCluster();
    MiniZooKeeperCluster miniZK = utility1.getZkCluster();
    // Have to reget conf1 in case zk cluster location different
    // than default
    conf1 = utility1.getConfiguration();
    zkw1 = new ZooKeeperWatcher(conf1, "cluster1", null, true);
    admin = new ReplicationAdmin(conf1);
    LOG.info("Setup first Zk");
    // Base conf2 on conf1 so it gets the right zk cluster.
    conf2 = HBaseConfiguration.create(conf1);
    conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
    conf2.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
    conf2.setBoolean("hbase.tests.use.shortcircuit.reads", false);
    utility2 = new HBaseTestingUtility(conf2);
    utility2.setZkCluster(miniZK);
    zkw2 = new ZooKeeperWatcher(conf2, "cluster2", null, true);
    LOG.info("Setup second Zk");
    CONF_WITH_LOCALFS = HBaseConfiguration.create(conf1);
    utility1.startMiniCluster(2);
    // Have a bunch of slave servers, because inter-cluster shipping logic uses number of sinks
    // as a component in deciding maximum number of parallel batches to send to the peer cluster.
    utility2.startMiniCluster(4);
    ReplicationPeerConfig rpc = new ReplicationPeerConfig();
    rpc.setClusterKey(utility2.getClusterKey());
    hbaseAdmin = ConnectionFactory.createConnection(conf1).getAdmin();
    hbaseAdmin.addReplicationPeer("2", rpc);
    HTableDescriptor table = new HTableDescriptor(tableName);
    HColumnDescriptor fam = new HColumnDescriptor(famName);
    fam.setMaxVersions(100);
    fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
    table.addFamily(fam);
    fam = new HColumnDescriptor(noRepfamName);
    table.addFamily(fam);
    scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    for (HColumnDescriptor f : table.getColumnFamilies()) {
        scopes.put(f.getName(), f.getScope());
    }
    Connection connection1 = ConnectionFactory.createConnection(conf1);
    Connection connection2 = ConnectionFactory.createConnection(conf2);
    try (Admin admin1 = connection1.getAdmin()) {
        admin1.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
    }
    try (Admin admin2 = connection2.getAdmin()) {
        admin2.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
    }
    utility1.waitUntilAllRegionsAssigned(tableName);
    utility2.waitUntilAllRegionsAssigned(tableName);
    htable1 = connection1.getTable(tableName);
    htable1.setWriteBufferSize(1024);
    htable2 = connection2.getTable(tableName);
}
Also used : HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) ZooKeeperWatcher(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) ReplicationAdmin(org.apache.hadoop.hbase.client.replication.ReplicationAdmin) Connection(org.apache.hadoop.hbase.client.Connection) MiniZooKeeperCluster(org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster) Admin(org.apache.hadoop.hbase.client.Admin) ReplicationAdmin(org.apache.hadoop.hbase.client.replication.ReplicationAdmin) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) BeforeClass(org.junit.BeforeClass)

Example 47 with ZooKeeperWatcher

use of org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher in project hbase by apache.

the class TestMultiSlaveReplication method setUpBeforeClass.

@BeforeClass
public static void setUpBeforeClass() throws Exception {
    conf1 = HBaseConfiguration.create();
    conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
    // smaller block size and capacity to trigger more operations
    // and test them
    conf1.setInt("hbase.regionserver.hlog.blocksize", 1024 * 20);
    conf1.setInt("replication.source.size.capacity", 1024);
    conf1.setLong("replication.source.sleepforretries", 100);
    conf1.setInt("hbase.regionserver.maxlogs", 10);
    conf1.setLong("hbase.master.logcleaner.ttl", 10);
    conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
    conf1.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY, "org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter");
    conf1.setInt("hbase.master.cleaner.interval", 5 * 1000);
    conf1.setClass("hbase.region.replica.replication.replicationQueues.class", ReplicationQueuesZKImpl.class, ReplicationQueues.class);
    utility1 = new HBaseTestingUtility(conf1);
    utility1.startMiniZKCluster();
    MiniZooKeeperCluster miniZK = utility1.getZkCluster();
    utility1.setZkCluster(miniZK);
    new ZooKeeperWatcher(conf1, "cluster1", null, true);
    conf2 = new Configuration(conf1);
    conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
    conf3 = new Configuration(conf1);
    conf3.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/3");
    utility2 = new HBaseTestingUtility(conf2);
    utility2.setZkCluster(miniZK);
    new ZooKeeperWatcher(conf2, "cluster2", null, true);
    utility3 = new HBaseTestingUtility(conf3);
    utility3.setZkCluster(miniZK);
    new ZooKeeperWatcher(conf3, "cluster3", null, true);
    table = new HTableDescriptor(tableName);
    HColumnDescriptor fam = new HColumnDescriptor(famName);
    fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
    table.addFamily(fam);
    fam = new HColumnDescriptor(noRepfamName);
    table.addFamily(fam);
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) ZooKeeperWatcher(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher) MiniZooKeeperCluster(org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster) BeforeClass(org.junit.BeforeClass)

Example 48 with ZooKeeperWatcher

use of org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher in project hbase by apache.

the class TestHBaseFsckOneRS method testCheckReplication.

@Test(timeout = 180000)
public void testCheckReplication() throws Exception {
    // check no errors
    HBaseFsck hbck = doFsck(conf, false);
    assertNoErrors(hbck);
    // create peer
    ReplicationAdmin replicationAdmin = new ReplicationAdmin(conf);
    Assert.assertEquals(0, replicationAdmin.getPeersCount());
    int zkPort = conf.getInt(HConstants.ZOOKEEPER_CLIENT_PORT, HConstants.DEFAULT_ZOOKEPER_CLIENT_PORT);
    ReplicationPeerConfig rpc = new ReplicationPeerConfig();
    rpc.setClusterKey("127.0.0.1:" + zkPort + ":/hbase");
    replicationAdmin.addPeer("1", rpc, null);
    replicationAdmin.getPeersCount();
    Assert.assertEquals(1, replicationAdmin.getPeersCount());
    // create replicator
    ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "Test Hbase Fsck", connection);
    ReplicationQueues repQueues = ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(conf, connection, zkw));
    repQueues.init("server1");
    // queues for current peer, no errors
    repQueues.addLog("1", "file1");
    repQueues.addLog("1-server2", "file1");
    Assert.assertEquals(2, repQueues.getAllQueues().size());
    hbck = doFsck(conf, false);
    assertNoErrors(hbck);
    // queues for removed peer
    repQueues.addLog("2", "file1");
    repQueues.addLog("2-server2", "file1");
    Assert.assertEquals(4, repQueues.getAllQueues().size());
    hbck = doFsck(conf, false);
    assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { HBaseFsck.ErrorReporter.ERROR_CODE.UNDELETED_REPLICATION_QUEUE, HBaseFsck.ErrorReporter.ERROR_CODE.UNDELETED_REPLICATION_QUEUE });
    // fix the case
    hbck = doFsck(conf, true);
    hbck = doFsck(conf, false);
    assertNoErrors(hbck);
    // ensure only "2" is deleted
    Assert.assertEquals(2, repQueues.getAllQueues().size());
    Assert.assertNull(repQueues.getLogsInQueue("2"));
    Assert.assertNull(repQueues.getLogsInQueue("2-sever2"));
    replicationAdmin.removePeer("1");
    repQueues.removeAllQueues();
    zkw.close();
    replicationAdmin.close();
}
Also used : ReplicationQueues(org.apache.hadoop.hbase.replication.ReplicationQueues) ReplicationQueuesArguments(org.apache.hadoop.hbase.replication.ReplicationQueuesArguments) ReplicationPeerConfig(org.apache.hadoop.hbase.replication.ReplicationPeerConfig) ZooKeeperWatcher(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher) ReplicationAdmin(org.apache.hadoop.hbase.client.replication.ReplicationAdmin) Test(org.junit.Test)

Example 49 with ZooKeeperWatcher

use of org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher in project phoenix by apache.

the class MutableIndexReplicationIT method setupConfigsAndStartCluster.

private static void setupConfigsAndStartCluster() throws Exception {
    // cluster-1 lives at regular HBase home, so we don't need to change how phoenix handles
    // lookups
    //        conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
    // smaller log roll size to trigger more events
    setUpConfigForMiniCluster(conf1);
    conf1.setFloat("hbase.regionserver.logroll.multiplier", 0.0003f);
    conf1.setInt("replication.source.size.capacity", 10240);
    conf1.setLong("replication.source.sleepforretries", 100);
    conf1.setInt("hbase.regionserver.maxlogs", 10);
    conf1.setLong("hbase.master.logcleaner.ttl", 10);
    conf1.setInt("zookeeper.recovery.retry", 1);
    conf1.setInt("zookeeper.recovery.retry.intervalmill", 10);
    conf1.setBoolean(HConstants.REPLICATION_ENABLE_KEY, HConstants.REPLICATION_ENABLE_DEFAULT);
    conf1.setBoolean("dfs.support.append", true);
    conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
    conf1.setInt("replication.stats.thread.period.seconds", 5);
    conf1.setBoolean("hbase.tests.use.shortcircuit.reads", false);
    utility1 = new HBaseTestingUtility(conf1);
    utility1.startMiniZKCluster();
    MiniZooKeeperCluster miniZK = utility1.getZkCluster();
    // Have to reset conf1 in case zk cluster location different
    // than default
    conf1 = utility1.getConfiguration();
    zkw1 = new ZooKeeperWatcher(conf1, "cluster1", null, true);
    admin = new ReplicationAdmin(conf1);
    LOG.info("Setup first Zk");
    // Base conf2 on conf1 so it gets the right zk cluster, and general cluster configs
    conf2 = HBaseConfiguration.create(conf1);
    conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
    conf2.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
    conf2.setBoolean(HConstants.REPLICATION_ENABLE_KEY, HConstants.REPLICATION_ENABLE_DEFAULT);
    conf2.setBoolean("dfs.support.append", true);
    conf2.setBoolean("hbase.tests.use.shortcircuit.reads", false);
    utility2 = new HBaseTestingUtility(conf2);
    utility2.setZkCluster(miniZK);
    zkw2 = new ZooKeeperWatcher(conf2, "cluster2", null, true);
    //replicate from cluster 1 -> cluster 2, but not back again
    admin.addPeer("1", utility2.getClusterKey());
    LOG.info("Setup second Zk");
    utility1.startMiniCluster(2);
    utility2.startMiniCluster(2);
}
Also used : HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) ZooKeeperWatcher(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher) ReplicationAdmin(org.apache.hadoop.hbase.client.replication.ReplicationAdmin) MiniZooKeeperCluster(org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster)

Example 50 with ZooKeeperWatcher

use of org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher in project phoenix by apache.

the class PhoenixMRJobSubmitter method scheduleIndexBuilds.

public int scheduleIndexBuilds() throws Exception {
    ZooKeeperWatcher zookeeperWatcher = new ZooKeeperWatcher(conf, "phoenixAutomatedMRIndexBuild", null);
    if (!ZKBasedMasterElectionUtil.acquireLock(zookeeperWatcher, PHOENIX_LOCKS_PARENT, AUTO_INDEX_BUILD_LOCK_NAME)) {
        LOG.info("Some other node is already running Automated Index Build. Skipping execution!");
        return -1;
    }
    // 1) Query Phoenix SYSTEM.CATALOG table to get a list of all candidate indexes to be built
    // (in state 'b')
    // 2) Get a list of all ACCEPTED, SUBMITTED AND RUNNING jobs from Yarn Resource Manager
    // 3) Get the jobs to submit (list from 1 - list from 2)
    // Get Candidate indexes to be built
    Map<String, PhoenixAsyncIndex> candidateJobs = getCandidateJobs();
    LOG.info("Candidate Indexes to be built as seen from SYSTEM.CATALOG - " + candidateJobs);
    // Get already scheduled Jobs list from Yarn Resource Manager
    Set<String> submittedJobs = getSubmittedYarnApps();
    LOG.info("Already Submitted/Running MR index build jobs - " + submittedJobs);
    // Get final jobs to submit
    Set<PhoenixAsyncIndex> jobsToSchedule = getJobsToSubmit(candidateJobs, submittedJobs);
    LOG.info("Final indexes to be built - " + jobsToSchedule);
    List<Future<Boolean>> results = new ArrayList<Future<Boolean>>(jobsToSchedule.size());
    int failedJobSubmissionCount = 0;
    int timedoutJobSubmissionCount = 0;
    ExecutorService jobSubmitPool = Executors.newFixedThreadPool(10);
    LOG.info("Attempt to submit MR index build jobs for - " + jobsToSchedule);
    try {
        for (PhoenixAsyncIndex indexToBuild : jobsToSchedule) {
            PhoenixMRJobCallable task = new PhoenixMRJobCallable(HBaseConfiguration.create(conf), indexToBuild, "/");
            results.add(jobSubmitPool.submit(task));
        }
        for (Future<Boolean> result : results) {
            try {
                result.get(JOB_SUBMIT_POOL_TIMEOUT, TimeUnit.MINUTES);
            } catch (InterruptedException e) {
                failedJobSubmissionCount++;
            } catch (ExecutionException e) {
                failedJobSubmissionCount++;
            } catch (TimeoutException e) {
                timedoutJobSubmissionCount++;
            }
        }
    } finally {
        PhoenixMRJobUtil.shutdown(jobSubmitPool);
    }
    LOG.info("Result of Attempt to Submit MR index build Jobs - Jobs attempted = " + jobsToSchedule.size() + " ; Failed to Submit = " + failedJobSubmissionCount + " ; Timed out = " + timedoutJobSubmissionCount);
    return failedJobSubmissionCount;
}
Also used : ArrayList(java.util.ArrayList) ZooKeeperWatcher(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher) ExecutorService(java.util.concurrent.ExecutorService) Future(java.util.concurrent.Future) ExecutionException(java.util.concurrent.ExecutionException) TimeoutException(java.util.concurrent.TimeoutException)

Aggregations

ZooKeeperWatcher (org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher)105 Test (org.junit.Test)46 Configuration (org.apache.hadoop.conf.Configuration)33 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)21 Table (org.apache.hadoop.hbase.client.Table)20 IOException (java.io.IOException)19 ServerName (org.apache.hadoop.hbase.ServerName)16 HRegionServer (org.apache.hadoop.hbase.regionserver.HRegionServer)15 Ignore (org.junit.Ignore)15 ArrayList (java.util.ArrayList)14 RegionServerThread (org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread)13 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)12 BeforeClass (org.junit.BeforeClass)12 HBaseTestingUtility (org.apache.hadoop.hbase.HBaseTestingUtility)11 List (java.util.List)10 KeeperException (org.apache.zookeeper.KeeperException)10 TimeoutException (java.util.concurrent.TimeoutException)9 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)9 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)9 Waiter (org.apache.hadoop.hbase.Waiter)9