use of org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher in project hbase by apache.
the class TestReplicationBase method setUpBeforeClass.
/**
* @throws java.lang.Exception
*/
@BeforeClass
public static void setUpBeforeClass() throws Exception {
conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
// We don't want too many edits per batch sent to the ReplicationEndpoint to trigger
// sufficient number of events. But we don't want to go too low because
// HBaseInterClusterReplicationEndpoint partitions entries into batches and we want
// more than one batch sent to the peer cluster for better testing.
conf1.setInt("replication.source.size.capacity", 102400);
conf1.setLong("replication.source.sleepforretries", 100);
conf1.setInt("hbase.regionserver.maxlogs", 10);
conf1.setLong("hbase.master.logcleaner.ttl", 10);
conf1.setInt("zookeeper.recovery.retry", 1);
conf1.setInt("zookeeper.recovery.retry.intervalmill", 10);
conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
conf1.setInt("replication.stats.thread.period.seconds", 5);
conf1.setBoolean("hbase.tests.use.shortcircuit.reads", false);
conf1.setLong("replication.sleep.before.failover", 2000);
conf1.setInt("replication.source.maxretriesmultiplier", 10);
conf1.setFloat("replication.source.ratio", 1.0f);
utility1 = new HBaseTestingUtility(conf1);
utility1.startMiniZKCluster();
MiniZooKeeperCluster miniZK = utility1.getZkCluster();
// Have to reget conf1 in case zk cluster location different
// than default
conf1 = utility1.getConfiguration();
zkw1 = new ZooKeeperWatcher(conf1, "cluster1", null, true);
admin = new ReplicationAdmin(conf1);
LOG.info("Setup first Zk");
// Base conf2 on conf1 so it gets the right zk cluster.
conf2 = HBaseConfiguration.create(conf1);
conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
conf2.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
conf2.setBoolean("hbase.tests.use.shortcircuit.reads", false);
utility2 = new HBaseTestingUtility(conf2);
utility2.setZkCluster(miniZK);
zkw2 = new ZooKeeperWatcher(conf2, "cluster2", null, true);
LOG.info("Setup second Zk");
CONF_WITH_LOCALFS = HBaseConfiguration.create(conf1);
utility1.startMiniCluster(2);
// Have a bunch of slave servers, because inter-cluster shipping logic uses number of sinks
// as a component in deciding maximum number of parallel batches to send to the peer cluster.
utility2.startMiniCluster(4);
ReplicationPeerConfig rpc = new ReplicationPeerConfig();
rpc.setClusterKey(utility2.getClusterKey());
hbaseAdmin = ConnectionFactory.createConnection(conf1).getAdmin();
hbaseAdmin.addReplicationPeer("2", rpc);
HTableDescriptor table = new HTableDescriptor(tableName);
HColumnDescriptor fam = new HColumnDescriptor(famName);
fam.setMaxVersions(100);
fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
table.addFamily(fam);
fam = new HColumnDescriptor(noRepfamName);
table.addFamily(fam);
scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (HColumnDescriptor f : table.getColumnFamilies()) {
scopes.put(f.getName(), f.getScope());
}
Connection connection1 = ConnectionFactory.createConnection(conf1);
Connection connection2 = ConnectionFactory.createConnection(conf2);
try (Admin admin1 = connection1.getAdmin()) {
admin1.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
}
try (Admin admin2 = connection2.getAdmin()) {
admin2.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
}
utility1.waitUntilAllRegionsAssigned(tableName);
utility2.waitUntilAllRegionsAssigned(tableName);
htable1 = connection1.getTable(tableName);
htable1.setWriteBufferSize(1024);
htable2 = connection2.getTable(tableName);
}
use of org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher in project hbase by apache.
the class TestHBaseFsckOneRS method testCheckReplication.
@Test(timeout = 180000)
public void testCheckReplication() throws Exception {
// check no errors
HBaseFsck hbck = doFsck(conf, false);
assertNoErrors(hbck);
// create peer
ReplicationAdmin replicationAdmin = new ReplicationAdmin(conf);
Assert.assertEquals(0, replicationAdmin.getPeersCount());
int zkPort = conf.getInt(HConstants.ZOOKEEPER_CLIENT_PORT, HConstants.DEFAULT_ZOOKEPER_CLIENT_PORT);
ReplicationPeerConfig rpc = new ReplicationPeerConfig();
rpc.setClusterKey("127.0.0.1:" + zkPort + ":/hbase");
replicationAdmin.addPeer("1", rpc, null);
replicationAdmin.getPeersCount();
Assert.assertEquals(1, replicationAdmin.getPeersCount());
// create replicator
ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "Test Hbase Fsck", connection);
ReplicationQueues repQueues = ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(conf, connection, zkw));
repQueues.init("server1");
// queues for current peer, no errors
repQueues.addLog("1", "file1");
repQueues.addLog("1-server2", "file1");
Assert.assertEquals(2, repQueues.getAllQueues().size());
hbck = doFsck(conf, false);
assertNoErrors(hbck);
// queues for removed peer
repQueues.addLog("2", "file1");
repQueues.addLog("2-server2", "file1");
Assert.assertEquals(4, repQueues.getAllQueues().size());
hbck = doFsck(conf, false);
assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { HBaseFsck.ErrorReporter.ERROR_CODE.UNDELETED_REPLICATION_QUEUE, HBaseFsck.ErrorReporter.ERROR_CODE.UNDELETED_REPLICATION_QUEUE });
// fix the case
hbck = doFsck(conf, true);
hbck = doFsck(conf, false);
assertNoErrors(hbck);
// ensure only "2" is deleted
Assert.assertEquals(2, repQueues.getAllQueues().size());
Assert.assertNull(repQueues.getLogsInQueue("2"));
Assert.assertNull(repQueues.getLogsInQueue("2-sever2"));
replicationAdmin.removePeer("1");
repQueues.removeAllQueues();
zkw.close();
replicationAdmin.close();
}
use of org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher in project phoenix by apache.
the class PhoenixMRJobSubmitter method scheduleIndexBuilds.
public int scheduleIndexBuilds() throws Exception {
ZooKeeperWatcher zookeeperWatcher = new ZooKeeperWatcher(conf, "phoenixAutomatedMRIndexBuild", null);
if (!ZKBasedMasterElectionUtil.acquireLock(zookeeperWatcher, PHOENIX_LOCKS_PARENT, AUTO_INDEX_BUILD_LOCK_NAME)) {
LOG.info("Some other node is already running Automated Index Build. Skipping execution!");
return -1;
}
// 1) Query Phoenix SYSTEM.CATALOG table to get a list of all candidate indexes to be built
// (in state 'b')
// 2) Get a list of all ACCEPTED, SUBMITTED AND RUNNING jobs from Yarn Resource Manager
// 3) Get the jobs to submit (list from 1 - list from 2)
// Get Candidate indexes to be built
Map<String, PhoenixAsyncIndex> candidateJobs = getCandidateJobs();
LOG.info("Candidate Indexes to be built as seen from SYSTEM.CATALOG - " + candidateJobs);
// Get already scheduled Jobs list from Yarn Resource Manager
Set<String> submittedJobs = getSubmittedYarnApps();
LOG.info("Already Submitted/Running MR index build jobs - " + submittedJobs);
// Get final jobs to submit
Set<PhoenixAsyncIndex> jobsToSchedule = getJobsToSubmit(candidateJobs, submittedJobs);
LOG.info("Final indexes to be built - " + jobsToSchedule);
List<Future<Boolean>> results = new ArrayList<Future<Boolean>>(jobsToSchedule.size());
int failedJobSubmissionCount = 0;
int timedoutJobSubmissionCount = 0;
ExecutorService jobSubmitPool = Executors.newFixedThreadPool(10);
LOG.info("Attempt to submit MR index build jobs for - " + jobsToSchedule);
try {
for (PhoenixAsyncIndex indexToBuild : jobsToSchedule) {
PhoenixMRJobCallable task = new PhoenixMRJobCallable(HBaseConfiguration.create(conf), indexToBuild, "/");
results.add(jobSubmitPool.submit(task));
}
for (Future<Boolean> result : results) {
try {
result.get(JOB_SUBMIT_POOL_TIMEOUT, TimeUnit.MINUTES);
} catch (InterruptedException e) {
failedJobSubmissionCount++;
} catch (ExecutionException e) {
failedJobSubmissionCount++;
} catch (TimeoutException e) {
timedoutJobSubmissionCount++;
}
}
} finally {
PhoenixMRJobUtil.shutdown(jobSubmitPool);
}
LOG.info("Result of Attempt to Submit MR index build Jobs - Jobs attempted = " + jobsToSchedule.size() + " ; Failed to Submit = " + failedJobSubmissionCount + " ; Timed out = " + timedoutJobSubmissionCount);
return failedJobSubmissionCount;
}
use of org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher in project phoenix by apache.
the class PhoenixMRJobUtil method getActiveResourceManagerHost.
public static String getActiveResourceManagerHost(Configuration config, String zkQuorum) throws IOException, InterruptedException, JSONException, KeeperException, InvalidProtocolBufferException, ZooKeeperConnectionException {
ZooKeeperWatcher zkw = null;
ZooKeeper zk = null;
String activeRMHost = null;
try {
zkw = new ZooKeeperWatcher(config, "get-active-yarnmanager", null);
zk = new ZooKeeper(zkQuorum, 30000, zkw, false);
List<String> children = zk.getChildren(YARN_LEADER_ELECTION, zkw);
for (String subEntry : children) {
List<String> subChildern = zk.getChildren(YARN_LEADER_ELECTION + "/" + subEntry, zkw);
for (String eachEntry : subChildern) {
if (eachEntry.contains(ACTIVE_STANDBY_ELECTOR_LOCK)) {
String path = YARN_LEADER_ELECTION + "/" + subEntry + "/" + ACTIVE_STANDBY_ELECTOR_LOCK;
byte[] data = zk.getData(path, zkw, new Stat());
ActiveRMInfoProto proto = ActiveRMInfoProto.parseFrom(data);
proto.getRmId();
LOG.info("Active RmId : " + proto.getRmId());
activeRMHost = config.get(YarnConfiguration.RM_HOSTNAME + "." + proto.getRmId());
LOG.info("activeResourceManagerHostname = " + activeRMHost);
}
}
}
} finally {
if (zkw != null)
zkw.close();
if (zk != null)
zk.close();
}
return activeRMHost;
}
use of org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher in project phoenix by apache.
the class MutableIndexReplicationIT method setupConfigsAndStartCluster.
private static void setupConfigsAndStartCluster() throws Exception {
// cluster-1 lives at regular HBase home, so we don't need to change how phoenix handles
// lookups
// conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
// smaller log roll size to trigger more events
setUpConfigForMiniCluster(conf1);
conf1.setFloat("hbase.regionserver.logroll.multiplier", 0.0003f);
conf1.setInt("replication.source.size.capacity", 10240);
conf1.setLong("replication.source.sleepforretries", 100);
conf1.setInt("hbase.regionserver.maxlogs", 10);
conf1.setLong("hbase.master.logcleaner.ttl", 10);
conf1.setInt("zookeeper.recovery.retry", 1);
conf1.setInt("zookeeper.recovery.retry.intervalmill", 10);
conf1.setBoolean(HConstants.REPLICATION_ENABLE_KEY, HConstants.REPLICATION_ENABLE_DEFAULT);
conf1.setBoolean("dfs.support.append", true);
conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
conf1.setInt("replication.stats.thread.period.seconds", 5);
conf1.setBoolean("hbase.tests.use.shortcircuit.reads", false);
utility1 = new HBaseTestingUtility(conf1);
utility1.startMiniZKCluster();
MiniZooKeeperCluster miniZK = utility1.getZkCluster();
// Have to reset conf1 in case zk cluster location different
// than default
conf1 = utility1.getConfiguration();
zkw1 = new ZooKeeperWatcher(conf1, "cluster1", null, true);
admin = new ReplicationAdmin(conf1);
LOG.info("Setup first Zk");
// Base conf2 on conf1 so it gets the right zk cluster, and general cluster configs
conf2 = HBaseConfiguration.create(conf1);
conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
conf2.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
conf2.setBoolean(HConstants.REPLICATION_ENABLE_KEY, HConstants.REPLICATION_ENABLE_DEFAULT);
conf2.setBoolean("dfs.support.append", true);
conf2.setBoolean("hbase.tests.use.shortcircuit.reads", false);
utility2 = new HBaseTestingUtility(conf2);
utility2.setZkCluster(miniZK);
zkw2 = new ZooKeeperWatcher(conf2, "cluster2", null, true);
// replicate from cluster 1 -> cluster 2, but not back again
admin.addPeer("1", utility2.getClusterKey());
LOG.info("Setup second Zk");
utility1.startMiniCluster(2);
utility2.startMiniCluster(2);
}
Aggregations