Search in sources :

Example 11 with StartTestingClusterOption

use of org.apache.hadoop.hbase.StartTestingClusterOption in project hbase by apache.

the class TestAsyncAdminBase method setUpBeforeClass.

@BeforeClass
public static void setUpBeforeClass() throws Exception {
    TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 60000);
    TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 120000);
    TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2);
    TEST_UTIL.getConfiguration().setInt(START_LOG_ERRORS_AFTER_COUNT_KEY, 0);
    StartTestingClusterOption option = StartTestingClusterOption.builder().numRegionServers(2).numMasters(2).build();
    TEST_UTIL.startMiniCluster(option);
    ASYNC_CONN = ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get();
}
Also used : StartTestingClusterOption(org.apache.hadoop.hbase.StartTestingClusterOption) BeforeClass(org.junit.BeforeClass)

Example 12 with StartTestingClusterOption

use of org.apache.hadoop.hbase.StartTestingClusterOption in project hbase by apache.

the class TestSeparateClientZKCluster method beforeAllTests.

@BeforeClass
public static void beforeAllTests() throws Exception {
    int clientZkPort = 21828;
    clientZkCluster = new MiniZooKeeperCluster(TEST_UTIL.getConfiguration());
    clientZkCluster.setDefaultClientPort(clientZkPort);
    clientZkCluster.startup(clientZkDir);
    // reduce the retry number and start log counter
    TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2);
    TEST_UTIL.getConfiguration().setInt("hbase.client.start.log.errors.counter", -1);
    TEST_UTIL.getConfiguration().setInt("zookeeper.recovery.retry", 1);
    // core settings for testing client ZK cluster
    TEST_UTIL.getConfiguration().setClass(HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, ZKConnectionRegistry.class, ConnectionRegistry.class);
    TEST_UTIL.getConfiguration().set(HConstants.CLIENT_ZOOKEEPER_QUORUM, HConstants.LOCALHOST);
    TEST_UTIL.getConfiguration().setInt(HConstants.CLIENT_ZOOKEEPER_CLIENT_PORT, clientZkPort);
    // reduce zk session timeout to easier trigger session expiration
    TEST_UTIL.getConfiguration().setInt(HConstants.ZK_SESSION_TIMEOUT, ZK_SESSION_TIMEOUT);
    // Start a cluster with 2 masters and 3 regionservers.
    StartTestingClusterOption option = StartTestingClusterOption.builder().numMasters(2).numRegionServers(3).numDataNodes(3).build();
    TEST_UTIL.startMiniCluster(option);
}
Also used : MiniZooKeeperCluster(org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster) StartTestingClusterOption(org.apache.hadoop.hbase.StartTestingClusterOption) BeforeClass(org.junit.BeforeClass)

Example 13 with StartTestingClusterOption

use of org.apache.hadoop.hbase.StartTestingClusterOption in project hbase by apache.

the class TestUpdateConfiguration method setup.

@BeforeClass
public static void setup() throws Exception {
    setUpConfigurationFiles(TEST_UTIL);
    StartTestingClusterOption option = StartTestingClusterOption.builder().numMasters(2).build();
    TEST_UTIL.startMiniCluster(option);
    addResourceToRegionServerConfiguration(TEST_UTIL);
}
Also used : StartTestingClusterOption(org.apache.hadoop.hbase.StartTestingClusterOption) BeforeClass(org.junit.BeforeClass)

Example 14 with StartTestingClusterOption

use of org.apache.hadoop.hbase.StartTestingClusterOption in project hbase by apache.

the class MasterFailoverWithProceduresTestBase method setUp.

@BeforeClass
public static void setUp() throws Exception {
    UTIL.getConfiguration().setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1);
    StartTestingClusterOption option = StartTestingClusterOption.builder().numMasters(2).build();
    UTIL.startMiniCluster(option);
    final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
    ProcedureTestingUtility.setToggleKillBeforeStoreUpdate(procExec, false);
    ProcedureTestingUtility.setKillBeforeStoreUpdate(procExec, false);
}
Also used : StartTestingClusterOption(org.apache.hadoop.hbase.StartTestingClusterOption) BeforeClass(org.junit.BeforeClass)

Example 15 with StartTestingClusterOption

use of org.apache.hadoop.hbase.StartTestingClusterOption in project hbase by apache.

the class TestHRegion method testgetHDFSBlocksDistribution.

@Test
public void testgetHDFSBlocksDistribution() throws Exception {
    HBaseTestingUtil htu = new HBaseTestingUtil();
    // Why do we set the block size in this test?  If we set it smaller than the kvs, then we'll
    // break up the file in to more pieces that can be distributed across the three nodes and we
    // won't be able to have the condition this test asserts; that at least one node has
    // a copy of all replicas -- if small block size, then blocks are spread evenly across the
    // the three nodes.  hfilev3 with tags seems to put us over the block size.  St.Ack.
    // final int DEFAULT_BLOCK_SIZE = 1024;
    // htu.getConfiguration().setLong("dfs.blocksize", DEFAULT_BLOCK_SIZE);
    htu.getConfiguration().setInt("dfs.replication", 2);
    // set up a cluster with 3 nodes
    SingleProcessHBaseCluster cluster = null;
    String[] dataNodeHosts = new String[] { "host1", "host2", "host3" };
    int regionServersCount = 3;
    try {
        StartTestingClusterOption option = StartTestingClusterOption.builder().numRegionServers(regionServersCount).dataNodeHosts(dataNodeHosts).build();
        cluster = htu.startMiniCluster(option);
        byte[][] families = { fam1, fam2 };
        Table ht = htu.createTable(tableName, families);
        // Setting up region
        byte[] row = Bytes.toBytes("row1");
        byte[] col = Bytes.toBytes("col1");
        Put put = new Put(row);
        put.addColumn(fam1, col, 1, Bytes.toBytes("test1"));
        put.addColumn(fam2, col, 1, Bytes.toBytes("test2"));
        ht.put(put);
        HRegion firstRegion = htu.getHBaseCluster().getRegions(tableName).get(0);
        firstRegion.flush(true);
        HDFSBlocksDistribution blocksDistribution1 = firstRegion.getHDFSBlocksDistribution();
        // Given the default replication factor is 2 and we have 2 HFiles,
        // we will have total of 4 replica of blocks on 3 datanodes; thus there
        // must be at least one host that have replica for 2 HFiles. That host's
        // weight will be equal to the unique block weight.
        long uniqueBlocksWeight1 = blocksDistribution1.getUniqueBlocksTotalWeight();
        StringBuilder sb = new StringBuilder();
        for (String host : blocksDistribution1.getTopHosts()) {
            if (sb.length() > 0)
                sb.append(", ");
            sb.append(host);
            sb.append("=");
            sb.append(blocksDistribution1.getWeight(host));
        }
        String topHost = blocksDistribution1.getTopHosts().get(0);
        long topHostWeight = blocksDistribution1.getWeight(topHost);
        String msg = "uniqueBlocksWeight=" + uniqueBlocksWeight1 + ", topHostWeight=" + topHostWeight + ", topHost=" + topHost + "; " + sb.toString();
        LOG.info(msg);
        assertTrue(msg, uniqueBlocksWeight1 == topHostWeight);
        // use the static method to compute the value, it should be the same.
        // static method is used by load balancer or other components
        HDFSBlocksDistribution blocksDistribution2 = HRegion.computeHDFSBlocksDistribution(htu.getConfiguration(), firstRegion.getTableDescriptor(), firstRegion.getRegionInfo());
        long uniqueBlocksWeight2 = blocksDistribution2.getUniqueBlocksTotalWeight();
        assertTrue(uniqueBlocksWeight1 == uniqueBlocksWeight2);
        ht.close();
    } finally {
        if (cluster != null) {
            htu.shutdownMiniCluster();
        }
    }
}
Also used : SingleProcessHBaseCluster(org.apache.hadoop.hbase.SingleProcessHBaseCluster) Table(org.apache.hadoop.hbase.client.Table) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) ByteString(org.apache.hbase.thirdparty.com.google.protobuf.ByteString) HBaseTestingUtil(org.apache.hadoop.hbase.HBaseTestingUtil) HDFSBlocksDistribution(org.apache.hadoop.hbase.HDFSBlocksDistribution) Put(org.apache.hadoop.hbase.client.Put) StartTestingClusterOption(org.apache.hadoop.hbase.StartTestingClusterOption) Test(org.junit.Test)

Aggregations

StartTestingClusterOption (org.apache.hadoop.hbase.StartTestingClusterOption)42 BeforeClass (org.junit.BeforeClass)21 HBaseTestingUtil (org.apache.hadoop.hbase.HBaseTestingUtil)13 Test (org.junit.Test)13 Configuration (org.apache.hadoop.conf.Configuration)10 SingleProcessHBaseCluster (org.apache.hadoop.hbase.SingleProcessHBaseCluster)8 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)5 TableName (org.apache.hadoop.hbase.TableName)5 Table (org.apache.hadoop.hbase.client.Table)5 MasterThread (org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread)5 ServerName (org.apache.hadoop.hbase.ServerName)4 RegionLocator (org.apache.hadoop.hbase.client.RegionLocator)4 Path (org.apache.hadoop.fs.Path)3 ClusterMetrics (org.apache.hadoop.hbase.ClusterMetrics)3 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)3 Before (org.junit.Before)3 InetAddress (java.net.InetAddress)2 NetworkInterface (java.net.NetworkInterface)2 ArrayList (java.util.ArrayList)2 HashMap (java.util.HashMap)2