use of org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster in project hbase by apache.
the class TestBackupBase method setUpBeforeClass.
/**
* @throws java.lang.Exception
*/
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL = new HBaseTestingUtility();
conf1 = TEST_UTIL.getConfiguration();
conf1.setBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY, true);
BackupManager.decorateMasterConfiguration(conf1);
BackupManager.decorateRegionServerConfiguration(conf1);
conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
// Set MultiWAL (with 2 default WAL files per RS)
conf1.set(WALFactory.WAL_PROVIDER, provider);
TEST_UTIL.startMiniZKCluster();
MiniZooKeeperCluster miniZK = TEST_UTIL.getZkCluster();
conf2 = HBaseConfiguration.create(conf1);
conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
TEST_UTIL2 = new HBaseTestingUtility(conf2);
TEST_UTIL2.setZkCluster(miniZK);
TEST_UTIL.startMiniCluster();
TEST_UTIL2.startMiniCluster();
conf1 = TEST_UTIL.getConfiguration();
TEST_UTIL.startMiniMapReduceCluster();
BACKUP_ROOT_DIR = TEST_UTIL.getConfiguration().get("fs.defaultFS") + "/backupUT";
LOG.info("ROOTDIR " + BACKUP_ROOT_DIR);
BACKUP_REMOTE_ROOT_DIR = TEST_UTIL2.getConfiguration().get("fs.defaultFS") + "/backupUT";
LOG.info("REMOTE ROOTDIR " + BACKUP_REMOTE_ROOT_DIR);
createTables();
populateFromMasterConfig(TEST_UTIL.getHBaseCluster().getMaster().getConfiguration(), conf1);
}
use of org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster in project hbase by apache.
the class TestHBaseTestingUtility method testMiniZooKeeperWithMultipleClientPorts.
@Test
public void testMiniZooKeeperWithMultipleClientPorts() throws Exception {
int defaultClientPort = 8888;
int i, j;
HBaseTestingUtility hbt = new HBaseTestingUtility();
// Test 1 - set up zookeeper cluster with same number of ZK servers and specified client ports
int[] clientPortList1 = { 1111, 1112, 1113 };
MiniZooKeeperCluster cluster1 = hbt.startMiniZKCluster(clientPortList1.length, clientPortList1);
try {
List<Integer> clientPortListInCluster = cluster1.getClientPortList();
for (i = 0; i < clientPortListInCluster.size(); i++) {
assertEquals(clientPortListInCluster.get(i).intValue(), clientPortList1[i]);
}
} finally {
hbt.shutdownMiniZKCluster();
}
// Test 2 - set up zookeeper cluster with more ZK servers than specified client ports
hbt.getConfiguration().setInt("test.hbase.zookeeper.property.clientPort", defaultClientPort);
int[] clientPortList2 = { 2222, 2223 };
MiniZooKeeperCluster cluster2 = hbt.startMiniZKCluster(clientPortList2.length + 2, clientPortList2);
try {
List<Integer> clientPortListInCluster = cluster2.getClientPortList();
for (i = 0, j = 0; i < clientPortListInCluster.size(); i++) {
if (i < clientPortList2.length) {
assertEquals(clientPortListInCluster.get(i).intValue(), clientPortList2[i]);
} else {
// servers with no specified client port will use defaultClientPort or some other ports
// based on defaultClientPort
assertEquals(clientPortListInCluster.get(i).intValue(), defaultClientPort + j);
j++;
}
}
} finally {
hbt.shutdownMiniZKCluster();
}
// Test 3 - set up zookeeper cluster with invalid client ports
hbt.getConfiguration().setInt("test.hbase.zookeeper.property.clientPort", defaultClientPort);
int[] clientPortList3 = { 3333, -3334, 3335, 0 };
MiniZooKeeperCluster cluster3 = hbt.startMiniZKCluster(clientPortList3.length + 1, clientPortList3);
try {
List<Integer> clientPortListInCluster = cluster3.getClientPortList();
for (i = 0, j = 0; i < clientPortListInCluster.size(); i++) {
// the default port or a port based on default port will be used.
if (i < clientPortList3.length && clientPortList3[i] > 0) {
assertEquals(clientPortListInCluster.get(i).intValue(), clientPortList3[i]);
} else {
assertEquals(clientPortListInCluster.get(i).intValue(), defaultClientPort + j);
j++;
}
}
} finally {
hbt.shutdownMiniZKCluster();
}
// Test 4 - set up zookeeper cluster with default port and some other ports used
// This test tests that the defaultClientPort and defaultClientPort+2 are used, so
// the algorithm should choice defaultClientPort+1 and defaultClientPort+3 to fill
// out the ports for servers without ports specified.
hbt.getConfiguration().setInt("test.hbase.zookeeper.property.clientPort", defaultClientPort);
int[] clientPortList4 = { -4444, defaultClientPort + 2, 4446, defaultClientPort };
MiniZooKeeperCluster cluster4 = hbt.startMiniZKCluster(clientPortList4.length + 1, clientPortList4);
try {
List<Integer> clientPortListInCluster = cluster4.getClientPortList();
for (i = 0, j = 1; i < clientPortListInCluster.size(); i++) {
// the default port or a port based on default port will be used.
if (i < clientPortList4.length && clientPortList4[i] > 0) {
assertEquals(clientPortListInCluster.get(i).intValue(), clientPortList4[i]);
} else {
assertEquals(clientPortListInCluster.get(i).intValue(), defaultClientPort + j);
j += 2;
}
}
} finally {
hbt.shutdownMiniZKCluster();
}
// Test 5 - set up zookeeper cluster with same ports specified - fail is expected.
int[] clientPortList5 = { 5555, 5556, 5556 };
try {
MiniZooKeeperCluster cluster5 = hbt.startMiniZKCluster(clientPortList5.length, clientPortList5);
// expected failure
assertTrue(cluster5.getClientPort() == -1);
} catch (Exception e) {
// exception is acceptable
} finally {
hbt.shutdownMiniZKCluster();
}
}
use of org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster in project hive by apache.
the class TestZooKeeperTokenStore method setUp.
@Override
protected void setUp() throws Exception {
File zkDataDir = new File(System.getProperty("test.tmp.dir"));
if (this.zkCluster != null) {
throw new IOException("Cluster already running");
}
this.zkCluster = new MiniZooKeeperCluster();
this.zkPort = this.zkCluster.startup(zkDataDir);
this.zkClient = CuratorFrameworkFactory.builder().connectString("localhost:" + zkPort).retryPolicy(new ExponentialBackoffRetry(1000, 3)).build();
this.zkClient.start();
}
use of org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster in project hbase by apache.
the class TestReplicaWithCluster method testReplicaAndReplication.
@SuppressWarnings("deprecation")
@Test
public void testReplicaAndReplication() throws Exception {
TableDescriptorBuilder builder = HTU.createModifyableTableDescriptor("testReplicaAndReplication");
builder.setRegionReplication(NB_SERVERS);
builder.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(row).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build());
builder.setCoprocessor(SlowMeCopro.class.getName());
TableDescriptor tableDescriptor = builder.build();
HTU.getAdmin().createTable(tableDescriptor, HBaseTestingUtil.KEYS_FOR_HBA_CREATE_TABLE);
Configuration conf2 = HBaseConfiguration.create(HTU.getConfiguration());
conf2.set(HConstants.HBASE_CLIENT_INSTANCE_ID, String.valueOf(-1));
conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
MiniZooKeeperCluster miniZK = HTU.getZkCluster();
HTU2 = new HBaseTestingUtil(conf2);
HTU2.setZkCluster(miniZK);
HTU2.startMiniCluster(NB_SERVERS);
LOG.info("Setup second Zk");
HTU2.getAdmin().createTable(tableDescriptor, HBaseTestingUtil.KEYS_FOR_HBA_CREATE_TABLE);
try (Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration());
Admin admin = connection.getAdmin()) {
ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder().setClusterKey(HTU2.getClusterKey()).build();
admin.addReplicationPeer("2", rpc);
}
Put p = new Put(row);
p.addColumn(row, row, row);
final Table table = HTU.getConnection().getTable(tableDescriptor.getTableName());
table.put(p);
HTU.getAdmin().flush(table.getName());
LOG.info("Put & flush done on the first cluster. Now doing a get on the same cluster.");
Waiter.waitFor(HTU.getConfiguration(), 1000, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
try {
SlowMeCopro.cdl.set(new CountDownLatch(1));
Get g = new Get(row);
g.setConsistency(Consistency.TIMELINE);
Result r = table.get(g);
Assert.assertTrue(r.isStale());
return !r.isEmpty();
} finally {
SlowMeCopro.cdl.get().countDown();
SlowMeCopro.sleepTime.set(0);
}
}
});
table.close();
LOG.info("stale get on the first cluster done. Now for the second.");
final Table table2 = HTU.getConnection().getTable(tableDescriptor.getTableName());
Waiter.waitFor(HTU.getConfiguration(), 1000, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
try {
SlowMeCopro.cdl.set(new CountDownLatch(1));
Get g = new Get(row);
g.setConsistency(Consistency.TIMELINE);
Result r = table2.get(g);
Assert.assertTrue(r.isStale());
return !r.isEmpty();
} finally {
SlowMeCopro.cdl.get().countDown();
SlowMeCopro.sleepTime.set(0);
}
}
});
table2.close();
HTU.getAdmin().disableTable(tableDescriptor.getTableName());
HTU.deleteTable(tableDescriptor.getTableName());
HTU2.getAdmin().disableTable(tableDescriptor.getTableName());
HTU2.deleteTable(tableDescriptor.getTableName());
// We shutdown HTU2 minicluster later, in afterClass(), as shutting down
// the minicluster has negative impact of deleting all HConnections in JVM.
}
use of org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster in project hbase by apache.
the class TestVerifyReplicationCrossDiffHdfs method setUpBeforeClass.
@BeforeClass
public static void setUpBeforeClass() throws Exception {
conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
util1 = new HBaseTestingUtil(conf1);
util1.startMiniZKCluster();
MiniZooKeeperCluster miniZK = util1.getZkCluster();
conf1 = util1.getConfiguration();
conf2 = HBaseConfiguration.create(conf1);
conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
util2 = new HBaseTestingUtil(conf2);
util2.setZkCluster(miniZK);
util1.startMiniCluster();
util2.startMiniCluster();
createTestingTable(util1.getAdmin());
createTestingTable(util2.getAdmin());
addTestingPeer();
LOG.info("Start to load some data to source cluster.");
loadSomeData();
LOG.info("Start mini MapReduce cluster.");
mapReduceUtil.setZkCluster(miniZK);
mapReduceUtil.startMiniMapReduceCluster();
}
Aggregations