Search in sources :

Example 6 with SingleProcessHBaseCluster

use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.

the class TestMasterShutdown method testMasterShutdown.

/**
 * Simple test of shutdown.
 * <p>
 * Starts with three masters.  Tells the active master to shutdown the cluster.
 * Verifies that all masters are properly shutdown.
 */
@Test
public void testMasterShutdown() throws Exception {
    // Create config to use for this cluster
    Configuration conf = HBaseConfiguration.create();
    // Start the cluster
    try {
        htu = new HBaseTestingUtil(conf);
        StartTestingClusterOption option = StartTestingClusterOption.builder().numMasters(3).numRegionServers(1).numDataNodes(1).build();
        final SingleProcessHBaseCluster cluster = htu.startMiniCluster(option);
        // wait for all master thread to spawn and start their run loop.
        final long thirtySeconds = TimeUnit.SECONDS.toMillis(30);
        final long oneSecond = TimeUnit.SECONDS.toMillis(1);
        assertNotEquals(-1, htu.waitFor(thirtySeconds, oneSecond, () -> {
            final List<MasterThread> masterThreads = cluster.getMasterThreads();
            return masterThreads != null && masterThreads.size() >= 3 && masterThreads.stream().allMatch(Thread::isAlive);
        }));
        // find the active master
        final HMaster active = cluster.getMaster();
        assertNotNull(active);
        // make sure the other two are backup masters
        ClusterMetrics status = active.getClusterMetrics();
        assertEquals(2, status.getBackupMasterNames().size());
        // tell the active master to shutdown the cluster
        active.shutdown();
        assertNotEquals(-1, htu.waitFor(thirtySeconds, oneSecond, () -> CollectionUtils.isEmpty(cluster.getLiveMasterThreads())));
        assertNotEquals(-1, htu.waitFor(thirtySeconds, oneSecond, () -> CollectionUtils.isEmpty(cluster.getLiveRegionServerThreads())));
    } finally {
        if (htu != null) {
            htu.shutdownMiniCluster();
            htu = null;
        }
    }
}
Also used : SingleProcessHBaseCluster(org.apache.hadoop.hbase.SingleProcessHBaseCluster) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) ClusterMetrics(org.apache.hadoop.hbase.ClusterMetrics) List(java.util.List) HBaseTestingUtil(org.apache.hadoop.hbase.HBaseTestingUtil) StartTestingClusterOption(org.apache.hadoop.hbase.StartTestingClusterOption) MasterThread(org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread) Test(org.junit.Test)

Example 7 with SingleProcessHBaseCluster

use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.

the class TestRoundRobinAssignmentOnRestart method test.

/**
 * This tests retaining assignments on a cluster restart
 */
@Test
public void test() throws Exception {
    UTIL.startMiniCluster(rsNum);
    // Turn off balancer
    UTIL.getMiniHBaseCluster().getMaster().getMasterRpcServices().synchronousBalanceSwitch(false);
    LOG.info("\n\nCreating tables");
    for (TableName TABLE : TABLES) {
        UTIL.createMultiRegionTable(TABLE, FAMILY, regionNum);
    }
    // Wait until all regions are assigned
    for (TableName TABLE : TABLES) {
        UTIL.waitTableEnabled(TABLE);
    }
    UTIL.waitUntilNoRegionsInTransition(60000);
    SingleProcessHBaseCluster cluster = UTIL.getHBaseCluster();
    List<JVMClusterUtil.RegionServerThread> threads = cluster.getLiveRegionServerThreads();
    assertEquals(2, threads.size());
    ServerName testServer = threads.get(0).getRegionServer().getServerName();
    int port = testServer.getPort();
    List<RegionInfo> regionInfos = cluster.getMaster().getAssignmentManager().getRegionsOnServer(testServer);
    LOG.debug("RegionServer {} has {} regions", testServer, regionInfos.size());
    assertTrue(regionInfos.size() >= (TABLES.length * regionNum / rsNum));
    // Restart 1 regionserver
    cluster.stopRegionServer(testServer);
    cluster.waitForRegionServerToStop(testServer, 60000);
    cluster.getConf().setInt(HConstants.REGIONSERVER_PORT, port);
    cluster.startRegionServer();
    HMaster master = UTIL.getMiniHBaseCluster().getMaster();
    List<ServerName> localServers = master.getServerManager().getOnlineServersList();
    ServerName newTestServer = null;
    for (ServerName serverName : localServers) {
        if (serverName.getAddress().equals(testServer.getAddress())) {
            newTestServer = serverName;
            break;
        }
    }
    assertNotNull(newTestServer);
    // Wait until all regions are assigned
    for (TableName TABLE : TABLES) {
        UTIL.waitTableAvailable(TABLE);
    }
    UTIL.waitUntilNoRegionsInTransition(60000);
    List<RegionInfo> newRegionInfos = cluster.getMaster().getAssignmentManager().getRegionsOnServer(newTestServer);
    LOG.debug("RegionServer {} has {} regions", newTestServer, newRegionInfos.size());
    assertTrue("Should not retain all regions when restart", newRegionInfos.size() < regionInfos.size());
}
Also used : SingleProcessHBaseCluster(org.apache.hadoop.hbase.SingleProcessHBaseCluster) TableName(org.apache.hadoop.hbase.TableName) ServerName(org.apache.hadoop.hbase.ServerName) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) Test(org.junit.Test)

Example 8 with SingleProcessHBaseCluster

use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.

the class TestReplicationSource method testServerShutdownRecoveredQueue.

/**
 * Tests that recovered queues are preserved on a regionserver shutdown.
 * See HBASE-18192
 */
@Test
public void testServerShutdownRecoveredQueue() throws Exception {
    try {
        // Ensure single-threaded WAL
        conf.set("hbase.wal.provider", "defaultProvider");
        conf.setInt("replication.sleep.before.failover", 2000);
        // Introduces a delay in regionserver shutdown to give the race condition a chance to kick in.
        conf.set(HConstants.REGION_SERVER_IMPL, ShutdownDelayRegionServer.class.getName());
        SingleProcessHBaseCluster cluster = TEST_UTIL.startMiniCluster(2);
        TEST_UTIL_PEER.startMiniCluster(1);
        HRegionServer serverA = cluster.getRegionServer(0);
        final ReplicationSourceManager managerA = serverA.getReplicationSourceService().getReplicationManager();
        HRegionServer serverB = cluster.getRegionServer(1);
        final ReplicationSourceManager managerB = serverB.getReplicationSourceService().getReplicationManager();
        final Admin admin = TEST_UTIL.getAdmin();
        final String peerId = "TestPeer";
        admin.addReplicationPeer(peerId, ReplicationPeerConfig.newBuilder().setClusterKey(TEST_UTIL_PEER.getClusterKey()).build());
        // Wait for replication sources to come up
        Waiter.waitFor(conf, 20000, new Waiter.Predicate<Exception>() {

            @Override
            public boolean evaluate() {
                return !(managerA.getSources().isEmpty() || managerB.getSources().isEmpty());
            }
        });
        // Disabling peer makes sure there is at least one log to claim when the server dies
        // The recovered queue will also stay there until the peer is disabled even if the
        // WALs it contains have no data.
        admin.disableReplicationPeer(peerId);
        // Stopping serverA
        // It's queues should be claimed by the only other alive server i.e. serverB
        cluster.stopRegionServer(serverA.getServerName());
        Waiter.waitFor(conf, 20000, new Waiter.Predicate<Exception>() {

            @Override
            public boolean evaluate() throws Exception {
                return managerB.getOldSources().size() == 1;
            }
        });
        final HRegionServer serverC = cluster.startRegionServer().getRegionServer();
        serverC.waitForServerOnline();
        Waiter.waitFor(conf, 20000, new Waiter.Predicate<Exception>() {

            @Override
            public boolean evaluate() throws Exception {
                return serverC.getReplicationSourceService() != null;
            }
        });
        final ReplicationSourceManager managerC = ((Replication) serverC.getReplicationSourceService()).getReplicationManager();
        // Sanity check
        assertEquals(0, managerC.getOldSources().size());
        // Stopping serverB
        // Now serverC should have two recovered queues:
        // 1. The serverB's normal queue
        // 2. serverA's recovered queue on serverB
        cluster.stopRegionServer(serverB.getServerName());
        Waiter.waitFor(conf, 20000, (Waiter.Predicate<Exception>) () -> managerC.getOldSources().size() == 2);
        admin.enableReplicationPeer(peerId);
        Waiter.waitFor(conf, 20000, (Waiter.Predicate<Exception>) () -> managerC.getOldSources().size() == 0);
    } finally {
        conf.set(HConstants.REGION_SERVER_IMPL, HRegionServer.class.getName());
    }
}
Also used : SingleProcessHBaseCluster(org.apache.hadoop.hbase.SingleProcessHBaseCluster) Admin(org.apache.hadoop.hbase.client.Admin) Waiter(org.apache.hadoop.hbase.Waiter) IOException(java.io.IOException) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer) Test(org.junit.Test)

Example 9 with SingleProcessHBaseCluster

use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.

the class TestBlockReorderBlockLocation method testBlockLocation.

/**
 * Test that the reorder algo works as we expect.
 */
@Test
public void testBlockLocation() throws Exception {
    // We need to start HBase to get  HConstants.HBASE_DIR set in conf
    htu.startMiniZKCluster();
    SingleProcessHBaseCluster hbm = htu.startMiniHBaseCluster();
    conf = hbm.getConfiguration();
    // The "/" is mandatory, without it we've got a null pointer exception on the namenode
    final String fileName = "/helloWorld";
    Path p = new Path(fileName);
    final int repCount = 3;
    Assert.assertTrue((short) cluster.getDataNodes().size() >= repCount);
    // Let's write the file
    FSDataOutputStream fop = dfs.create(p, (short) repCount);
    final double toWrite = 875.5613;
    fop.writeDouble(toWrite);
    fop.close();
    for (int i = 0; i < 10; i++) {
        // The interceptor is not set in this test, so we get the raw list at this point
        LocatedBlocks l;
        final long max = EnvironmentEdgeManager.currentTime() + 10000;
        do {
            l = getNamenode(dfs.getClient()).getBlockLocations(fileName, 0, 1);
            Assert.assertNotNull(l.getLocatedBlocks());
            Assert.assertEquals(1, l.getLocatedBlocks().size());
            Assert.assertTrue("Expecting " + repCount + " , got " + l.get(0).getLocations().length, EnvironmentEdgeManager.currentTime() < max);
        } while (l.get(0).getLocations().length != repCount);
        // Should be filtered, the name is different => The order won't change
        Object[] originalList = l.getLocatedBlocks().toArray();
        HFileSystem.ReorderWALBlocks lrb = new HFileSystem.ReorderWALBlocks();
        lrb.reorderBlocks(conf, l, fileName);
        Assert.assertArrayEquals(originalList, l.getLocatedBlocks().toArray());
        // Should be reordered, as we pretend to be a file name with a compliant stuff
        Assert.assertNotNull(conf.get(HConstants.HBASE_DIR));
        Assert.assertFalse(conf.get(HConstants.HBASE_DIR).isEmpty());
        String pseudoLogFile = conf.get(HConstants.HBASE_DIR) + "/" + HConstants.HREGION_LOGDIR_NAME + "/" + host1 + ",6977,6576" + "/mylogfile";
        // Check that it will be possible to extract a ServerName from our construction
        Assert.assertNotNull("log= " + pseudoLogFile, AbstractFSWALProvider.getServerNameFromWALDirectoryName(dfs.getConf(), pseudoLogFile));
        // And check we're doing the right reorder.
        lrb.reorderBlocks(conf, l, pseudoLogFile);
        Assert.assertEquals(host1, l.get(0).getLocations()[2].getHostName());
        // Check again, it should remain the same.
        lrb.reorderBlocks(conf, l, pseudoLogFile);
        Assert.assertEquals(host1, l.get(0).getLocations()[2].getHostName());
    }
}
Also used : SingleProcessHBaseCluster(org.apache.hadoop.hbase.SingleProcessHBaseCluster) Path(org.apache.hadoop.fs.Path) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 10 with SingleProcessHBaseCluster

use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.

the class TestFlushWithThroughputController method getStoreWithName.

private HStore getStoreWithName(TableName tableName) {
    SingleProcessHBaseCluster cluster = hbtu.getMiniHBaseCluster();
    List<JVMClusterUtil.RegionServerThread> rsts = cluster.getRegionServerThreads();
    for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) {
        HRegionServer hrs = rsts.get(i).getRegionServer();
        for (Region region : hrs.getRegions(tableName)) {
            return ((HRegion) region).getStores().iterator().next();
        }
    }
    return null;
}
Also used : SingleProcessHBaseCluster(org.apache.hadoop.hbase.SingleProcessHBaseCluster) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Region(org.apache.hadoop.hbase.regionserver.Region) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer)

Aggregations

SingleProcessHBaseCluster (org.apache.hadoop.hbase.SingleProcessHBaseCluster)85 Test (org.junit.Test)69 HRegionServer (org.apache.hadoop.hbase.regionserver.HRegionServer)31 TableName (org.apache.hadoop.hbase.TableName)26 Admin (org.apache.hadoop.hbase.client.Admin)24 Table (org.apache.hadoop.hbase.client.Table)22 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)22 HMaster (org.apache.hadoop.hbase.master.HMaster)21 ServerName (org.apache.hadoop.hbase.ServerName)18 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)18 HBaseTestingUtil (org.apache.hadoop.hbase.HBaseTestingUtil)14 MasterCoprocessorHost (org.apache.hadoop.hbase.master.MasterCoprocessorHost)13 IOException (java.io.IOException)12 Configuration (org.apache.hadoop.conf.Configuration)12 Put (org.apache.hadoop.hbase.client.Put)12 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)12 TableDescriptorBuilder (org.apache.hadoop.hbase.client.TableDescriptorBuilder)10 File (java.io.File)9 Path (org.apache.hadoop.fs.Path)9 RegionMoverBuilder (org.apache.hadoop.hbase.util.RegionMover.RegionMoverBuilder)9