Search in sources :

Example 16 with HBaseTestingUtility

use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.

the class TestRollingRestart method testBasicRollingRestart.

@Test(timeout = 500000)
public void testBasicRollingRestart() throws Exception {
    // Start a cluster with 2 masters and 4 regionservers
    final int NUM_MASTERS = 2;
    final int NUM_RS = 3;
    final int NUM_REGIONS_TO_CREATE = 20;
    int expectedNumRS = 3;
    // Start the cluster
    log("Starting cluster");
    Configuration conf = HBaseConfiguration.create();
    HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(conf);
    TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
    log("Waiting for active/ready master");
    cluster.waitForActiveAndReadyMaster();
    // Create a table with regions
    final TableName tableName = TableName.valueOf(name.getMethodName());
    byte[] family = Bytes.toBytes("family");
    log("Creating table with " + NUM_REGIONS_TO_CREATE + " regions");
    Table ht = TEST_UTIL.createMultiRegionTable(tableName, family, NUM_REGIONS_TO_CREATE);
    int numRegions = -1;
    try (RegionLocator r = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
        numRegions = r.getStartKeys().length;
    }
    // catalogs
    numRegions += 1;
    log("Waiting for no more RIT\n");
    TEST_UTIL.waitUntilNoRegionsInTransition(60000);
    log("Disabling table\n");
    TEST_UTIL.getAdmin().disableTable(tableName);
    log("Waiting for no more RIT\n");
    TEST_UTIL.waitUntilNoRegionsInTransition(60000);
    NavigableSet<String> regions = HBaseTestingUtility.getAllOnlineRegions(cluster);
    log("Verifying only catalog and namespace regions are assigned\n");
    if (regions.size() != 2) {
        for (String oregion : regions) log("Region still online: " + oregion);
    }
    assertEquals(2, regions.size());
    log("Enabling table\n");
    TEST_UTIL.getAdmin().enableTable(tableName);
    log("Waiting for no more RIT\n");
    TEST_UTIL.waitUntilNoRegionsInTransition(60000);
    log("Verifying there are " + numRegions + " assigned on cluster\n");
    regions = HBaseTestingUtility.getAllOnlineRegions(cluster);
    assertRegionsAssigned(cluster, regions);
    assertEquals(expectedNumRS, cluster.getRegionServerThreads().size());
    // Add a new regionserver
    log("Adding a fourth RS");
    RegionServerThread restarted = cluster.startRegionServer();
    expectedNumRS++;
    restarted.waitForServerOnline();
    log("Additional RS is online");
    log("Waiting for no more RIT");
    TEST_UTIL.waitUntilNoRegionsInTransition(60000);
    log("Verifying there are " + numRegions + " assigned on cluster");
    assertRegionsAssigned(cluster, regions);
    assertEquals(expectedNumRS, cluster.getRegionServerThreads().size());
    // Master Restarts
    List<MasterThread> masterThreads = cluster.getMasterThreads();
    MasterThread activeMaster = null;
    MasterThread backupMaster = null;
    assertEquals(2, masterThreads.size());
    if (masterThreads.get(0).getMaster().isActiveMaster()) {
        activeMaster = masterThreads.get(0);
        backupMaster = masterThreads.get(1);
    } else {
        activeMaster = masterThreads.get(1);
        backupMaster = masterThreads.get(0);
    }
    // Bring down the backup master
    log("Stopping backup master\n\n");
    backupMaster.getMaster().stop("Stop of backup during rolling restart");
    cluster.hbaseCluster.waitOnMaster(backupMaster);
    // Bring down the primary master
    log("Stopping primary master\n\n");
    activeMaster.getMaster().stop("Stop of active during rolling restart");
    cluster.hbaseCluster.waitOnMaster(activeMaster);
    // Start primary master
    log("Restarting primary master\n\n");
    activeMaster = cluster.startMaster();
    cluster.waitForActiveAndReadyMaster();
    // Start backup master
    log("Restarting backup master\n\n");
    backupMaster = cluster.startMaster();
    assertEquals(expectedNumRS, cluster.getRegionServerThreads().size());
    // RegionServer Restarts
    // Bring them down, one at a time, waiting between each to complete
    List<RegionServerThread> regionServers = cluster.getLiveRegionServerThreads();
    int num = 1;
    int total = regionServers.size();
    for (RegionServerThread rst : regionServers) {
        ServerName serverName = rst.getRegionServer().getServerName();
        log("Stopping region server " + num + " of " + total + " [ " + serverName + "]");
        rst.getRegionServer().stop("Stopping RS during rolling restart");
        cluster.hbaseCluster.waitOnRegionServer(rst);
        log("Waiting for RS shutdown to be handled by master");
        waitForRSShutdownToStartAndFinish(activeMaster, serverName);
        log("RS shutdown done, waiting for no more RIT");
        TEST_UTIL.waitUntilNoRegionsInTransition(60000);
        log("Verifying there are " + numRegions + " assigned on cluster");
        assertRegionsAssigned(cluster, regions);
        expectedNumRS--;
        assertEquals(expectedNumRS, cluster.getRegionServerThreads().size());
        log("Restarting region server " + num + " of " + total);
        restarted = cluster.startRegionServer();
        restarted.waitForServerOnline();
        expectedNumRS++;
        log("Region server " + num + " is back online");
        log("Waiting for no more RIT");
        TEST_UTIL.waitUntilNoRegionsInTransition(60000);
        log("Verifying there are " + numRegions + " assigned on cluster");
        assertRegionsAssigned(cluster, regions);
        assertEquals(expectedNumRS, cluster.getRegionServerThreads().size());
        num++;
    }
    Thread.sleep(1000);
    assertRegionsAssigned(cluster, regions);
    // TODO: Bring random 3 of 4 RS down at the same time
    ht.close();
    // Stop the cluster
    TEST_UTIL.shutdownMiniCluster();
}
Also used : RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) MiniHBaseCluster(org.apache.hadoop.hbase.MiniHBaseCluster) TableName(org.apache.hadoop.hbase.TableName) HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) MasterThread(org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread) ServerName(org.apache.hadoop.hbase.ServerName) RegionServerThread(org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread) Test(org.junit.Test)

Example 17 with HBaseTestingUtility

use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.

the class TestMasterShutdown method testMasterShutdown.

/**
   * Simple test of shutdown.
   * <p>
   * Starts with three masters.  Tells the active master to shutdown the cluster.
   * Verifies that all masters are properly shutdown.
   * @throws Exception
   */
@Test(timeout = 120000)
public void testMasterShutdown() throws Exception {
    final int NUM_MASTERS = 3;
    final int NUM_RS = 3;
    // Create config to use for this cluster
    Configuration conf = HBaseConfiguration.create();
    // Start the cluster
    HBaseTestingUtility htu = new HBaseTestingUtility(conf);
    htu.startMiniCluster(NUM_MASTERS, NUM_RS);
    MiniHBaseCluster cluster = htu.getHBaseCluster();
    // get all the master threads
    List<MasterThread> masterThreads = cluster.getMasterThreads();
    // wait for each to come online
    for (MasterThread mt : masterThreads) {
        assertTrue(mt.isAlive());
    }
    // find the active master
    HMaster active = null;
    for (int i = 0; i < masterThreads.size(); i++) {
        if (masterThreads.get(i).getMaster().isActiveMaster()) {
            active = masterThreads.get(i).getMaster();
            break;
        }
    }
    assertNotNull(active);
    // make sure the other two are backup masters
    ClusterStatus status = active.getClusterStatus();
    assertEquals(2, status.getBackupMastersSize());
    assertEquals(2, status.getBackupMasters().size());
    // tell the active master to shutdown the cluster
    active.shutdown();
    for (int i = NUM_MASTERS - 1; i >= 0; --i) {
        cluster.waitOnMaster(i);
    }
    // make sure all the masters properly shutdown
    assertEquals(0, masterThreads.size());
    htu.shutdownMiniCluster();
}
Also used : HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) Configuration(org.apache.hadoop.conf.Configuration) HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) MasterThread(org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread) MiniHBaseCluster(org.apache.hadoop.hbase.MiniHBaseCluster) ClusterStatus(org.apache.hadoop.hbase.ClusterStatus) Test(org.junit.Test)

Example 18 with HBaseTestingUtility

use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.

the class TestMasterShutdown method testMasterShutdownBeforeStartingAnyRegionServer.

@Test(timeout = 60000)
public void testMasterShutdownBeforeStartingAnyRegionServer() throws Exception {
    final int NUM_MASTERS = 1;
    final int NUM_RS = 0;
    // Create config to use for this cluster
    Configuration conf = HBaseConfiguration.create();
    conf.setInt("hbase.ipc.client.failed.servers.expiry", 200);
    conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, 1);
    // Start the cluster
    final HBaseTestingUtility util = new HBaseTestingUtility(conf);
    util.startMiniDFSCluster(3);
    util.startMiniZKCluster();
    util.createRootDir();
    final LocalHBaseCluster cluster = new LocalHBaseCluster(conf, NUM_MASTERS, NUM_RS, HMaster.class, MiniHBaseCluster.MiniHBaseClusterRegionServer.class);
    final int MASTER_INDEX = 0;
    final MasterThread master = cluster.getMasters().get(MASTER_INDEX);
    master.start();
    LOG.info("Called master start on " + master.getName());
    Thread shutdownThread = new Thread("Shutdown-Thread") {

        public void run() {
            LOG.info("Before call to shutdown master");
            try {
                try (Connection connection = ConnectionFactory.createConnection(util.getConfiguration())) {
                    try (Admin admin = connection.getAdmin()) {
                        admin.shutdown();
                    }
                }
                LOG.info("After call to shutdown master");
                cluster.waitOnMaster(MASTER_INDEX);
            } catch (Exception e) {
            }
        }

        ;
    };
    shutdownThread.start();
    LOG.info("Called master join on " + master.getName());
    master.join();
    shutdownThread.join();
    List<MasterThread> masterThreads = cluster.getMasters();
    // make sure all the masters properly shutdown
    assertEquals(0, masterThreads.size());
    util.shutdownMiniZKCluster();
    util.shutdownMiniDFSCluster();
    util.cleanupTestDir();
}
Also used : HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) Configuration(org.apache.hadoop.conf.Configuration) HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) MasterThread(org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread) Connection(org.apache.hadoop.hbase.client.Connection) MiniHBaseCluster(org.apache.hadoop.hbase.MiniHBaseCluster) LocalHBaseCluster(org.apache.hadoop.hbase.LocalHBaseCluster) Admin(org.apache.hadoop.hbase.client.Admin) MasterThread(org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread) Test(org.junit.Test)

Example 19 with HBaseTestingUtility

use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.

the class TestReplicationBase method setUpBeforeClass.

/**
   * @throws java.lang.Exception
   */
@BeforeClass
public static void setUpBeforeClass() throws Exception {
    conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
    // We don't want too many edits per batch sent to the ReplicationEndpoint to trigger
    // sufficient number of events. But we don't want to go too low because
    // HBaseInterClusterReplicationEndpoint partitions entries into batches and we want
    // more than one batch sent to the peer cluster for better testing.
    conf1.setInt("replication.source.size.capacity", 102400);
    conf1.setLong("replication.source.sleepforretries", 100);
    conf1.setInt("hbase.regionserver.maxlogs", 10);
    conf1.setLong("hbase.master.logcleaner.ttl", 10);
    conf1.setInt("zookeeper.recovery.retry", 1);
    conf1.setInt("zookeeper.recovery.retry.intervalmill", 10);
    conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
    conf1.setInt("replication.stats.thread.period.seconds", 5);
    conf1.setBoolean("hbase.tests.use.shortcircuit.reads", false);
    conf1.setLong("replication.sleep.before.failover", 2000);
    conf1.setInt("replication.source.maxretriesmultiplier", 10);
    conf1.setFloat("replication.source.ratio", 1.0f);
    utility1 = new HBaseTestingUtility(conf1);
    utility1.startMiniZKCluster();
    MiniZooKeeperCluster miniZK = utility1.getZkCluster();
    // Have to reget conf1 in case zk cluster location different
    // than default
    conf1 = utility1.getConfiguration();
    zkw1 = new ZooKeeperWatcher(conf1, "cluster1", null, true);
    admin = new ReplicationAdmin(conf1);
    LOG.info("Setup first Zk");
    // Base conf2 on conf1 so it gets the right zk cluster.
    conf2 = HBaseConfiguration.create(conf1);
    conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
    conf2.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
    conf2.setBoolean("hbase.tests.use.shortcircuit.reads", false);
    utility2 = new HBaseTestingUtility(conf2);
    utility2.setZkCluster(miniZK);
    zkw2 = new ZooKeeperWatcher(conf2, "cluster2", null, true);
    LOG.info("Setup second Zk");
    CONF_WITH_LOCALFS = HBaseConfiguration.create(conf1);
    utility1.startMiniCluster(2);
    // Have a bunch of slave servers, because inter-cluster shipping logic uses number of sinks
    // as a component in deciding maximum number of parallel batches to send to the peer cluster.
    utility2.startMiniCluster(4);
    ReplicationPeerConfig rpc = new ReplicationPeerConfig();
    rpc.setClusterKey(utility2.getClusterKey());
    hbaseAdmin = ConnectionFactory.createConnection(conf1).getAdmin();
    hbaseAdmin.addReplicationPeer("2", rpc);
    HTableDescriptor table = new HTableDescriptor(tableName);
    HColumnDescriptor fam = new HColumnDescriptor(famName);
    fam.setMaxVersions(100);
    fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
    table.addFamily(fam);
    fam = new HColumnDescriptor(noRepfamName);
    table.addFamily(fam);
    scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    for (HColumnDescriptor f : table.getColumnFamilies()) {
        scopes.put(f.getName(), f.getScope());
    }
    Connection connection1 = ConnectionFactory.createConnection(conf1);
    Connection connection2 = ConnectionFactory.createConnection(conf2);
    try (Admin admin1 = connection1.getAdmin()) {
        admin1.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
    }
    try (Admin admin2 = connection2.getAdmin()) {
        admin2.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
    }
    utility1.waitUntilAllRegionsAssigned(tableName);
    utility2.waitUntilAllRegionsAssigned(tableName);
    htable1 = connection1.getTable(tableName);
    htable1.setWriteBufferSize(1024);
    htable2 = connection2.getTable(tableName);
}
Also used : HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) ZooKeeperWatcher(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) ReplicationAdmin(org.apache.hadoop.hbase.client.replication.ReplicationAdmin) Connection(org.apache.hadoop.hbase.client.Connection) MiniZooKeeperCluster(org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster) Admin(org.apache.hadoop.hbase.client.Admin) ReplicationAdmin(org.apache.hadoop.hbase.client.replication.ReplicationAdmin) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) BeforeClass(org.junit.BeforeClass)

Example 20 with HBaseTestingUtility

use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.

the class TestFlushWithThroughputController method setUp.

@Before
public void setUp() {
    hbtu = new HBaseTestingUtility();
    tableName = TableName.valueOf("Table-" + testName.getMethodName());
    hbtu.getConfiguration().set(FlushThroughputControllerFactory.HBASE_FLUSH_THROUGHPUT_CONTROLLER_KEY, PressureAwareFlushThroughputController.class.getName());
}
Also used : HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) Before(org.junit.Before)

Aggregations

HBaseTestingUtility (org.apache.hadoop.hbase.HBaseTestingUtility)136 Configuration (org.apache.hadoop.conf.Configuration)50 BeforeClass (org.junit.BeforeClass)49 Test (org.junit.Test)42 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)35 Path (org.apache.hadoop.fs.Path)29 Admin (org.apache.hadoop.hbase.client.Admin)24 FileSystem (org.apache.hadoop.fs.FileSystem)22 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)20 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)18 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)16 Before (org.junit.Before)14 MiniHBaseCluster (org.apache.hadoop.hbase.MiniHBaseCluster)11 ZooKeeperWatcher (org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher)11 MiniZooKeeperCluster (org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster)10 Table (org.apache.hadoop.hbase.client.Table)8 HFileSystem (org.apache.hadoop.hbase.fs.HFileSystem)8 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)8 FileStatus (org.apache.hadoop.fs.FileStatus)7 Result (org.apache.hadoop.hbase.client.Result)7