use of org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster in project hbase by apache.
the class HMasterCommandLine method startMaster.
private int startMaster() {
Configuration conf = getConf();
try {
// and regionserver both in the one JVM.
if (LocalHBaseCluster.isLocal(conf)) {
DefaultMetricsSystem.setMiniClusterMode(true);
final MiniZooKeeperCluster zooKeeperCluster = new MiniZooKeeperCluster(conf);
File zkDataPath = new File(conf.get(HConstants.ZOOKEEPER_DATA_DIR));
// find out the default client port
int zkClientPort = 0;
// If the zookeeper client port is specified in server quorum, use it.
String zkserver = conf.get(HConstants.ZOOKEEPER_QUORUM);
if (zkserver != null) {
String[] zkservers = zkserver.split(",");
if (zkservers.length > 1) {
// In local mode deployment, we have the master + a region server and zookeeper server
// started in the same process. Therefore, we only support one zookeeper server.
String errorMsg = "Could not start ZK with " + zkservers.length + " ZK servers in local mode deployment. Aborting as clients (e.g. shell) will not " + "be able to find this ZK quorum.";
System.err.println(errorMsg);
throw new IOException(errorMsg);
}
String[] parts = zkservers[0].split(":");
if (parts.length == 2) {
// the second part is the client port
zkClientPort = Integer.parseInt(parts[1]);
}
}
// If the client port could not be find in server quorum conf, try another conf
if (zkClientPort == 0) {
zkClientPort = conf.getInt(HConstants.ZOOKEEPER_CLIENT_PORT, 0);
// The client port has to be set by now; if not, throw exception.
if (zkClientPort == 0) {
throw new IOException("No config value for " + HConstants.ZOOKEEPER_CLIENT_PORT);
}
}
zooKeeperCluster.setDefaultClientPort(zkClientPort);
// set the ZK tick time if specified
int zkTickTime = conf.getInt(HConstants.ZOOKEEPER_TICK_TIME, 0);
if (zkTickTime > 0) {
zooKeeperCluster.setTickTime(zkTickTime);
}
// login the zookeeper server principal (if using security)
ZKAuthentication.loginServer(conf, HConstants.ZK_SERVER_KEYTAB_FILE, HConstants.ZK_SERVER_KERBEROS_PRINCIPAL, null);
int localZKClusterSessionTimeout = conf.getInt(HConstants.ZK_SESSION_TIMEOUT + ".localHBaseCluster", 10 * 1000);
conf.setInt(HConstants.ZK_SESSION_TIMEOUT, localZKClusterSessionTimeout);
LOG.info("Starting a zookeeper cluster");
int clientPort = zooKeeperCluster.startup(zkDataPath);
if (clientPort != zkClientPort) {
String errorMsg = "Could not start ZK at requested port of " + zkClientPort + ". ZK was started at port: " + clientPort + ". Aborting as clients (e.g. shell) will not be able to find " + "this ZK quorum.";
System.err.println(errorMsg);
throw new IOException(errorMsg);
}
conf.set(HConstants.ZOOKEEPER_CLIENT_PORT, Integer.toString(clientPort));
// Need to have the zk cluster shutdown when master is shutdown.
// Run a subclass that does the zk cluster shutdown on its way out.
int mastersCount = conf.getInt("hbase.masters", 1);
int regionServersCount = conf.getInt("hbase.regionservers", 1);
// Set start timeout to 5 minutes for cmd line start operations
conf.setIfUnset("hbase.master.start.timeout.localHBaseCluster", "300000");
LOG.info("Starting up instance of localHBaseCluster; master=" + mastersCount + ", regionserversCount=" + regionServersCount);
LocalHBaseCluster cluster = new LocalHBaseCluster(conf, mastersCount, regionServersCount, LocalHMaster.class, HRegionServer.class);
((LocalHMaster) cluster.getMaster(0)).setZKCluster(zooKeeperCluster);
cluster.startup();
waitOnMasterThreads(cluster);
} else {
logProcessInfo(getConf());
HMaster master = HMaster.constructMaster(masterClass, conf);
if (master.isStopped()) {
LOG.info("Won't bring the Master up as a shutdown is requested");
return 1;
}
master.start();
master.join();
if (master.isAborted())
throw new RuntimeException("HMaster Aborted");
}
} catch (Throwable t) {
LOG.error("Master exiting", t);
return 1;
}
return 0;
}
use of org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster in project hbase by apache.
the class TestHBaseTestingUtil method testMiniZooKeeperWithMultipleClientPorts.
@Test
public void testMiniZooKeeperWithMultipleClientPorts() throws Exception {
int defaultClientPort = 8888;
int i, j;
HBaseTestingUtil hbt = new HBaseTestingUtil();
// Test 1 - set up zookeeper cluster with same number of ZK servers and specified client ports
int[] clientPortList1 = { 1111, 1112, 1113 };
MiniZooKeeperCluster cluster1 = hbt.startMiniZKCluster(clientPortList1.length, clientPortList1);
try {
List<Integer> clientPortListInCluster = cluster1.getClientPortList();
for (i = 0; i < clientPortListInCluster.size(); i++) {
// cannot assert the specific port due to the port conflict in which situation
// it always chooses a bigger port by +1. The below is the same.
assertTrue(clientPortListInCluster.get(i).intValue() >= clientPortList1[i]);
}
} finally {
hbt.shutdownMiniZKCluster();
}
// Test 2 - set up zookeeper cluster with more ZK servers than specified client ports
hbt.getConfiguration().setInt("test.hbase.zookeeper.property.clientPort", defaultClientPort);
int[] clientPortList2 = { 2222, 2223 };
MiniZooKeeperCluster cluster2 = hbt.startMiniZKCluster(clientPortList2.length + 2, clientPortList2);
try {
List<Integer> clientPortListInCluster = cluster2.getClientPortList();
for (i = 0, j = 0; i < clientPortListInCluster.size(); i++) {
if (i < clientPortList2.length) {
assertTrue(clientPortListInCluster.get(i).intValue() >= clientPortList2[i]);
} else {
// servers with no specified client port will use defaultClientPort or some other ports
// based on defaultClientPort
assertTrue(clientPortListInCluster.get(i).intValue() >= defaultClientPort + j);
j++;
}
}
} finally {
hbt.shutdownMiniZKCluster();
}
// Test 3 - set up zookeeper cluster with invalid client ports
hbt.getConfiguration().setInt("test.hbase.zookeeper.property.clientPort", defaultClientPort);
int[] clientPortList3 = { 3333, -3334, 3335, 0 };
MiniZooKeeperCluster cluster3 = hbt.startMiniZKCluster(clientPortList3.length + 1, clientPortList3);
try {
List<Integer> clientPortListInCluster = cluster3.getClientPortList();
for (i = 0, j = 0; i < clientPortListInCluster.size(); i++) {
// the default port or a port based on default port will be used.
if (i < clientPortList3.length && clientPortList3[i] > 0) {
assertTrue(clientPortListInCluster.get(i).intValue() >= clientPortList3[i]);
} else {
assertTrue(clientPortListInCluster.get(i).intValue() >= defaultClientPort + j);
j++;
}
}
} finally {
hbt.shutdownMiniZKCluster();
}
// Test 4 - set up zookeeper cluster with default port and some other ports used
// This test tests that the defaultClientPort and defaultClientPort+2 are used, so
// the algorithm should choice defaultClientPort+1 and defaultClientPort+3 to fill
// out the ports for servers without ports specified.
hbt.getConfiguration().setInt("test.hbase.zookeeper.property.clientPort", defaultClientPort);
int[] clientPortList4 = { -4444, defaultClientPort + 2, 4446, defaultClientPort };
MiniZooKeeperCluster cluster4 = hbt.startMiniZKCluster(clientPortList4.length + 1, clientPortList4);
try {
List<Integer> clientPortListInCluster = cluster4.getClientPortList();
for (i = 0, j = 1; i < clientPortListInCluster.size(); i++) {
// the default port or a port based on default port will be used.
if (i < clientPortList4.length && clientPortList4[i] > 0) {
assertTrue(clientPortListInCluster.get(i).intValue() >= clientPortList4[i]);
} else {
assertTrue(clientPortListInCluster.get(i).intValue() >= defaultClientPort + j);
j += 2;
}
}
} finally {
hbt.shutdownMiniZKCluster();
}
// Test 5 - set up zookeeper cluster with same ports specified - fail is expected.
int[] clientPortList5 = { 5555, 5556, 5556 };
try {
MiniZooKeeperCluster cluster5 = hbt.startMiniZKCluster(clientPortList5.length, clientPortList5);
// expected failure
assertTrue(cluster5.getClientPort() == -1);
} catch (Exception e) {
// exception is acceptable
} finally {
hbt.shutdownMiniZKCluster();
}
}
use of org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster in project hbase by apache.
the class HBaseZKTestingUtility method startMiniZKCluster.
/**
* Start a mini ZK cluster. If the property "test.hbase.zookeeper.property.clientPort" is set the
* port mentioned is used as the default port for ZooKeeper.
*/
private MiniZooKeeperCluster startMiniZKCluster(File dir, int zooKeeperServerNum, int[] clientPortList) throws Exception {
if (this.zkCluster != null) {
throw new IOException("Cluster already running at " + dir);
}
this.passedZkCluster = false;
this.zkCluster = new MiniZooKeeperCluster(this.getConfiguration());
int defPort = this.conf.getInt("test.hbase.zookeeper.property.clientPort", 0);
if (defPort > 0) {
// If there is a port in the config file, we use it.
this.zkCluster.setDefaultClientPort(defPort);
}
if (clientPortList != null) {
// Ignore extra client ports
int clientPortListSize = Math.min(clientPortList.length, zooKeeperServerNum);
for (int i = 0; i < clientPortListSize; i++) {
this.zkCluster.addClientPort(clientPortList[i]);
}
}
int clientPort = this.zkCluster.startup(dir, zooKeeperServerNum);
this.conf.set(HConstants.ZOOKEEPER_CLIENT_PORT, Integer.toString(clientPort));
return this.zkCluster;
}
use of org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster in project hbase by apache.
the class TestNamespaceReplicationWithBulkLoadedData method testBulkLoadReplicationActiveActive.
@Test
@Override
public void testBulkLoadReplicationActiveActive() throws Exception {
Table peer1TestTable = UTIL1.getConnection().getTable(TestReplicationBase.tableName);
Table peer2TestTable = UTIL2.getConnection().getTable(TestReplicationBase.tableName);
Table peer3TestTable = UTIL3.getConnection().getTable(TestReplicationBase.tableName);
Table notPeerTable = UTIL4.getConnection().getTable(TestReplicationBase.tableName);
Table ns1Table = UTIL4.getConnection().getTable(NS1_TABLE);
Table ns2Table = UTIL4.getConnection().getTable(NS2_TABLE);
// case1: The ns1 tables will be replicate to cluster4
byte[] row = Bytes.toBytes("002_ns_peer");
byte[] value = Bytes.toBytes("v2");
bulkLoadOnCluster(ns1Table.getName(), row, value, UTIL1);
waitForReplication(ns1Table, 1, NB_RETRIES);
assertTableHasValue(ns1Table, row, value);
// case2: The ns2:t2_syncup will be replicate to cluster4
// If it's not fix HBASE-23098 the ns_peer1's hfile-refs(zk) will be backlog
row = Bytes.toBytes("003_ns_table_peer");
value = Bytes.toBytes("v2");
bulkLoadOnCluster(ns2Table.getName(), row, value, UTIL1);
waitForReplication(ns2Table, 1, NB_RETRIES);
assertTableHasValue(ns2Table, row, value);
// case3: The table test will be replicate to cluster1,cluster2,cluster3
// not replicate to cluster4, because we not set other peer for that tables.
row = Bytes.toBytes("001_nopeer");
value = Bytes.toBytes("v1");
assertBulkLoadConditions(tableName, row, value, UTIL1, peer1TestTable, peer2TestTable, peer3TestTable);
// 1 -> 4, table is empty
assertTableNoValue(notPeerTable, row, value);
// Verify hfile-refs for 1:ns_peer1, expect is empty
MiniZooKeeperCluster zkCluster = UTIL1.getZkCluster();
ZKWatcher watcher = new ZKWatcher(UTIL1.getConfiguration(), "TestZnodeHFiles-refs", null);
RecoverableZooKeeper zk = RecoverableZooKeeper.connect(UTIL1.getConfiguration(), watcher);
ZKReplicationQueueStorage replicationQueueStorage = new ZKReplicationQueueStorage(watcher, UTIL1.getConfiguration());
Set<String> hfiles = replicationQueueStorage.getAllHFileRefs();
assertTrue(hfiles.isEmpty());
}
use of org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster in project hbase by apache.
the class TestReplicationEditsDroppedWithDroppedTable method setUpBeforeClass.
@BeforeClass
public static void setUpBeforeClass() throws Exception {
// Set true to filter replication edits for dropped table
conf1.setBoolean(HBaseInterClusterReplicationEndpoint.REPLICATION_DROP_ON_DELETED_TABLE_KEY, true);
conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
conf1.setInt("replication.source.nb.capacity", 1);
utility1 = new HBaseTestingUtil(conf1);
utility1.startMiniZKCluster();
MiniZooKeeperCluster miniZK = utility1.getZkCluster();
conf1 = utility1.getConfiguration();
conf2 = HBaseConfiguration.create(conf1);
conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
utility2 = new HBaseTestingUtil(conf2);
utility2.setZkCluster(miniZK);
utility1.startMiniCluster(1);
utility2.startMiniCluster(1);
admin1 = utility1.getAdmin();
admin2 = utility2.getAdmin();
NamespaceDescriptor nsDesc = NamespaceDescriptor.create(namespace).build();
admin1.createNamespace(nsDesc);
admin2.createNamespace(nsDesc);
}
Aggregations