Search in sources :

Example 56 with SingleProcessHBaseCluster

use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.

the class TestMetaRegionReplicaReplication method testCatalogReplicaReplicationWithReplicaMoved.

@Test
public void testCatalogReplicaReplicationWithReplicaMoved() throws Exception {
    SingleProcessHBaseCluster cluster = HTU.getMiniHBaseCluster();
    HRegionServer hrs = cluster.getRegionServer(cluster.getServerHoldingMeta());
    HRegionServer hrsNoMetaReplica = null;
    HRegionServer server = null;
    Region metaReplica = null;
    boolean hostingMeta;
    for (int i = 0; i < cluster.getNumLiveRegionServers(); i++) {
        server = cluster.getRegionServer(i);
        hostingMeta = false;
        if (server == hrs) {
            continue;
        }
        for (Region region : server.getOnlineRegionsLocalContext()) {
            if (region.getRegionInfo().isMetaRegion()) {
                if (metaReplica == null) {
                    metaReplica = region;
                }
                hostingMeta = true;
                break;
            }
        }
        if (!hostingMeta) {
            hrsNoMetaReplica = server;
        }
    }
    try (Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration());
        Table table = connection.getTable(TableName.META_TABLE_NAME)) {
        // load the data to the table
        for (int i = 0; i < 5; i++) {
            LOG.info("Writing data from " + i * 1000 + " to " + (i * 1000 + 1000));
            HTU.loadNumericRows(table, HConstants.CATALOG_FAMILY, i * 1000, i * 1000 + 1000);
            if (i == 0) {
                HTU.moveRegionAndWait(metaReplica.getRegionInfo(), hrsNoMetaReplica.getServerName());
            }
        }
        verifyReplication(TableName.META_TABLE_NAME, numOfMetaReplica, 0, 5000, HConstants.CATALOG_FAMILY);
    }
}
Also used : SingleProcessHBaseCluster(org.apache.hadoop.hbase.SingleProcessHBaseCluster) Table(org.apache.hadoop.hbase.client.Table) Connection(org.apache.hadoop.hbase.client.Connection) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Region(org.apache.hadoop.hbase.regionserver.Region) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer) Test(org.junit.Test)

Example 57 with SingleProcessHBaseCluster

use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.

the class TestWALSplitWithDeletedTableData method testWALSplitWithDeletedTableData.

@Test
public void testWALSplitWithDeletedTableData() throws Exception {
    final byte[] CFNAME = Bytes.toBytes("f1");
    final byte[] QNAME = Bytes.toBytes("q1");
    final byte[] VALUE = Bytes.toBytes("v1");
    final TableName t1 = TableName.valueOf("t1");
    final TableName t2 = TableName.valueOf("t2");
    final byte[][] splitRows = { Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c"), Bytes.toBytes("d") };
    TableDescriptorBuilder htdBuilder1 = TableDescriptorBuilder.newBuilder(t1);
    htdBuilder1.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(CFNAME).build());
    Table tab1 = TEST_UTIL.createTable(htdBuilder1.build(), splitRows);
    TableDescriptorBuilder htdBuilder2 = TableDescriptorBuilder.newBuilder(t2);
    htdBuilder2.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(CFNAME).build());
    Table tab2 = TEST_UTIL.createTable(htdBuilder2.build(), splitRows);
    List<Put> puts = new ArrayList<Put>(4);
    byte[][] rks = { Bytes.toBytes("ac"), Bytes.toBytes("ba"), Bytes.toBytes("ca"), Bytes.toBytes("dd") };
    for (byte[] rk : rks) {
        puts.add(new Put(rk).addColumn(CFNAME, QNAME, VALUE));
    }
    tab1.put(puts);
    tab2.put(puts);
    SingleProcessHBaseCluster cluster = TEST_UTIL.getMiniHBaseCluster();
    TEST_UTIL.deleteTable(t1);
    Path tableDir = CommonFSUtils.getWALTableDir(TEST_UTIL.getConfiguration(), t1);
    // Dropping table 't1' removed the table directory from the WAL FS completely
    assertFalse(TEST_UTIL.getDFSCluster().getFileSystem().exists(tableDir));
    ServerName rs1 = cluster.getRegionServer(1).getServerName();
    // Kill one RS and wait for the WAL split and replay be over.
    cluster.killRegionServer(rs1);
    cluster.waitForRegionServerToStop(rs1, 60 * 1000);
    assertEquals(1, cluster.getNumLiveRegionServers());
    Thread.sleep(1 * 1000);
    TEST_UTIL.waitUntilNoRegionsInTransition(60 * 1000);
    // Table 't1' is dropped. Assert table directory does not exist in WAL FS after WAL split.
    assertFalse(TEST_UTIL.getDFSCluster().getFileSystem().exists(tableDir));
    // Assert the table t2 region's data getting replayed after WAL split and available
    for (byte[] rk : rks) {
        Result result = tab2.get(new Get(rk));
        assertFalse(result.isEmpty());
        Cell cell = result.getColumnLatestCell(CFNAME, QNAME);
        assertNotNull(cell);
        assertTrue(CellUtil.matchingValue(cell, VALUE));
    }
}
Also used : SingleProcessHBaseCluster(org.apache.hadoop.hbase.SingleProcessHBaseCluster) Path(org.apache.hadoop.fs.Path) Table(org.apache.hadoop.hbase.client.Table) ArrayList(java.util.ArrayList) TableDescriptorBuilder(org.apache.hadoop.hbase.client.TableDescriptorBuilder) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result) TableName(org.apache.hadoop.hbase.TableName) ServerName(org.apache.hadoop.hbase.ServerName) Get(org.apache.hadoop.hbase.client.Get) Cell(org.apache.hadoop.hbase.Cell) Test(org.junit.Test)

Example 58 with SingleProcessHBaseCluster

use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.

the class TestMaster method testMoveRegionWhenNotInitialized.

@Test
public void testMoveRegionWhenNotInitialized() {
    SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
    HMaster m = cluster.getMaster();
    try {
        // fake it, set back later
        m.setInitialized(false);
        RegionInfo meta = RegionInfoBuilder.FIRST_META_REGIONINFO;
        m.move(meta.getEncodedNameAsBytes(), null);
        fail("Region should not be moved since master is not initialized");
    } catch (IOException ioe) {
        assertTrue(ioe instanceof PleaseHoldException);
    } finally {
        m.setInitialized(true);
    }
}
Also used : SingleProcessHBaseCluster(org.apache.hadoop.hbase.SingleProcessHBaseCluster) PleaseHoldException(org.apache.hadoop.hbase.PleaseHoldException) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) IOException(java.io.IOException) Test(org.junit.Test)

Example 59 with SingleProcessHBaseCluster

use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.

the class TestMasterFailover method testSimpleMasterFailover.

/**
 * Simple test of master failover.
 * <p>
 * Starts with three masters.  Kills a backup master.  Then kills the active
 * master.  Ensures the final master becomes active and we can still contact
 * the cluster.
 */
@Test
public void testSimpleMasterFailover() throws Exception {
    final int NUM_MASTERS = 3;
    final int NUM_RS = 3;
    // Start the cluster
    HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
    try {
        StartTestingClusterOption option = StartTestingClusterOption.builder().numMasters(NUM_MASTERS).numRegionServers(NUM_RS).numDataNodes(NUM_RS).build();
        TEST_UTIL.startMiniCluster(option);
        SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
        // get all the master threads
        List<MasterThread> masterThreads = cluster.getMasterThreads();
        // wait for each to come online
        for (MasterThread mt : masterThreads) {
            assertTrue(mt.isAlive());
        }
        // verify only one is the active master and we have right number
        int numActive = 0;
        int activeIndex = -1;
        ServerName activeName = null;
        HMaster active = null;
        for (int i = 0; i < masterThreads.size(); i++) {
            if (masterThreads.get(i).getMaster().isActiveMaster()) {
                numActive++;
                activeIndex = i;
                active = masterThreads.get(activeIndex).getMaster();
                activeName = active.getServerName();
            }
        }
        assertEquals(1, numActive);
        assertEquals(NUM_MASTERS, masterThreads.size());
        LOG.info("Active master " + activeName);
        // Check that ClusterStatus reports the correct active and backup masters
        assertNotNull(active);
        ClusterMetrics status = active.getClusterMetrics();
        assertEquals(activeName, status.getMasterName());
        assertEquals(2, status.getBackupMasterNames().size());
        // attempt to stop one of the inactive masters
        int backupIndex = (activeIndex == 0 ? 1 : activeIndex - 1);
        HMaster master = cluster.getMaster(backupIndex);
        LOG.debug("\n\nStopping a backup master: " + master.getServerName() + "\n");
        cluster.stopMaster(backupIndex, false);
        cluster.waitOnMaster(backupIndex);
        // Verify still one active master and it's the same
        for (int i = 0; i < masterThreads.size(); i++) {
            if (masterThreads.get(i).getMaster().isActiveMaster()) {
                assertEquals(activeName, masterThreads.get(i).getMaster().getServerName());
                activeIndex = i;
                active = masterThreads.get(activeIndex).getMaster();
            }
        }
        assertEquals(1, numActive);
        assertEquals(2, masterThreads.size());
        int rsCount = masterThreads.get(activeIndex).getMaster().getClusterMetrics().getLiveServerMetrics().size();
        LOG.info("Active master " + active.getServerName() + " managing " + rsCount + " regions servers");
        assertEquals(3, rsCount);
        // wait for the active master to acknowledge loss of the backup from ZK
        final HMaster activeFinal = active;
        TEST_UTIL.waitFor(TimeUnit.MINUTES.toMillis(5), () -> activeFinal.getBackupMasters().size() == 1);
        // Check that ClusterStatus reports the correct active and backup masters
        assertNotNull(active);
        status = active.getClusterMetrics();
        assertEquals(activeName, status.getMasterName());
        assertEquals(1, status.getBackupMasterNames().size());
        // kill the active master
        LOG.debug("\n\nStopping the active master " + active.getServerName() + "\n");
        cluster.stopMaster(activeIndex, false);
        cluster.waitOnMaster(activeIndex);
        // wait for an active master to show up and be ready
        assertTrue(cluster.waitForActiveAndReadyMaster());
        LOG.debug("\n\nVerifying backup master is now active\n");
        // should only have one master now
        assertEquals(1, masterThreads.size());
        // and he should be active
        active = masterThreads.get(0).getMaster();
        assertNotNull(active);
        status = active.getClusterMetrics();
        ServerName masterName = status.getMasterName();
        assertNotNull(masterName);
        assertEquals(active.getServerName(), masterName);
        assertTrue(active.isActiveMaster());
        assertEquals(0, status.getBackupMasterNames().size());
        int rss = status.getLiveServerMetrics().size();
        LOG.info("Active master {} managing {} region servers", masterName.getServerName(), rss);
        assertEquals(3, rss);
    } finally {
        // Stop the cluster
        TEST_UTIL.shutdownMiniCluster();
    }
}
Also used : SingleProcessHBaseCluster(org.apache.hadoop.hbase.SingleProcessHBaseCluster) ClusterMetrics(org.apache.hadoop.hbase.ClusterMetrics) MasterThread(org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread) ServerName(org.apache.hadoop.hbase.ServerName) HBaseTestingUtil(org.apache.hadoop.hbase.HBaseTestingUtil) StartTestingClusterOption(org.apache.hadoop.hbase.StartTestingClusterOption) Test(org.junit.Test)

Example 60 with SingleProcessHBaseCluster

use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.

the class TestMasterFailover method testMetaInTransitionWhenMasterFailover.

/**
 * Test meta in transition when master failover.
 * This test used to manipulate region state up in zk. That is not allowed any more in hbase2
 * so I removed that messing. That makes this test anemic.
 */
@Test
public void testMetaInTransitionWhenMasterFailover() throws Exception {
    // Start the cluster
    HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
    TEST_UTIL.startMiniCluster();
    try {
        SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
        LOG.info("Cluster started");
        HMaster activeMaster = cluster.getMaster();
        ServerName metaServerName = cluster.getServerHoldingMeta();
        HRegionServer hrs = cluster.getRegionServer(metaServerName);
        // Now kill master, meta should remain on rs, where we placed it before.
        LOG.info("Aborting master");
        activeMaster.abort("test-kill");
        cluster.waitForMasterToStop(activeMaster.getServerName(), 30000);
        LOG.info("Master has aborted");
        // meta should remain where it was
        RegionState metaState = MetaTableLocator.getMetaRegionState(hrs.getZooKeeper());
        assertEquals("hbase:meta should be online on RS", metaState.getServerName(), metaServerName);
        assertEquals("hbase:meta should be online on RS", State.OPEN, metaState.getState());
        // Start up a new master
        LOG.info("Starting up a new master");
        activeMaster = cluster.startMaster().getMaster();
        LOG.info("Waiting for master to be ready");
        cluster.waitForActiveAndReadyMaster();
        LOG.info("Master is ready");
        // ensure meta is still deployed on RS
        metaState = MetaTableLocator.getMetaRegionState(activeMaster.getZooKeeper());
        assertEquals("hbase:meta should be online on RS", metaState.getServerName(), metaServerName);
        assertEquals("hbase:meta should be online on RS", State.OPEN, metaState.getState());
    // Done, shutdown the cluster
    } finally {
        TEST_UTIL.shutdownMiniCluster();
    }
}
Also used : SingleProcessHBaseCluster(org.apache.hadoop.hbase.SingleProcessHBaseCluster) ServerName(org.apache.hadoop.hbase.ServerName) HBaseTestingUtil(org.apache.hadoop.hbase.HBaseTestingUtil) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer) Test(org.junit.Test)

Aggregations

SingleProcessHBaseCluster (org.apache.hadoop.hbase.SingleProcessHBaseCluster)85 Test (org.junit.Test)69 HRegionServer (org.apache.hadoop.hbase.regionserver.HRegionServer)31 TableName (org.apache.hadoop.hbase.TableName)26 Admin (org.apache.hadoop.hbase.client.Admin)24 Table (org.apache.hadoop.hbase.client.Table)22 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)22 HMaster (org.apache.hadoop.hbase.master.HMaster)21 ServerName (org.apache.hadoop.hbase.ServerName)18 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)18 HBaseTestingUtil (org.apache.hadoop.hbase.HBaseTestingUtil)14 MasterCoprocessorHost (org.apache.hadoop.hbase.master.MasterCoprocessorHost)13 IOException (java.io.IOException)12 Configuration (org.apache.hadoop.conf.Configuration)12 Put (org.apache.hadoop.hbase.client.Put)12 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)12 TableDescriptorBuilder (org.apache.hadoop.hbase.client.TableDescriptorBuilder)10 File (java.io.File)9 Path (org.apache.hadoop.fs.Path)9 RegionMoverBuilder (org.apache.hadoop.hbase.util.RegionMover.RegionMoverBuilder)9