Search in sources :

Example 76 with MiniHBaseCluster

use of org.apache.hadoop.hbase.MiniHBaseCluster in project hbase by apache.

the class TestRestartCluster method testRetainAssignmentOnRestart.

/**
   * This tests retaining assignments on a cluster restart
   */
@Test(timeout = 300000)
public void testRetainAssignmentOnRestart() throws Exception {
    UTIL.startMiniCluster(2);
    while (!UTIL.getMiniHBaseCluster().getMaster().isInitialized()) {
        Threads.sleep(1);
    }
    // Turn off balancer
    UTIL.getMiniHBaseCluster().getMaster().getMasterRpcServices().synchronousBalanceSwitch(false);
    LOG.info("\n\nCreating tables");
    for (TableName TABLE : TABLES) {
        UTIL.createTable(TABLE, FAMILY);
    }
    for (TableName TABLE : TABLES) {
        UTIL.waitTableEnabled(TABLE);
    }
    HMaster master = UTIL.getMiniHBaseCluster().getMaster();
    UTIL.waitUntilNoRegionsInTransition(120000);
    // We don't have to use SnapshotOfRegionAssignmentFromMeta.
    // We use it here because AM used to use it to load all user region placements
    SnapshotOfRegionAssignmentFromMeta snapshot = new SnapshotOfRegionAssignmentFromMeta(master.getConnection());
    snapshot.initialize();
    Map<HRegionInfo, ServerName> regionToRegionServerMap = snapshot.getRegionToRegionServerMap();
    MiniHBaseCluster cluster = UTIL.getHBaseCluster();
    List<JVMClusterUtil.RegionServerThread> threads = cluster.getLiveRegionServerThreads();
    assertEquals(2, threads.size());
    int[] rsPorts = new int[3];
    for (int i = 0; i < 2; i++) {
        rsPorts[i] = threads.get(i).getRegionServer().getServerName().getPort();
    }
    rsPorts[2] = cluster.getMaster().getServerName().getPort();
    for (ServerName serverName : regionToRegionServerMap.values()) {
        // Test only, no need to optimize
        boolean found = false;
        for (int k = 0; k < 3 && !found; k++) {
            found = serverName.getPort() == rsPorts[k];
        }
        assertTrue(found);
    }
    LOG.info("\n\nShutting down HBase cluster");
    cluster.shutdown();
    cluster.waitUntilShutDown();
    LOG.info("\n\nSleeping a bit");
    Thread.sleep(2000);
    LOG.info("\n\nStarting cluster the second time with the same ports");
    try {
        cluster.getConf().setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, 4);
        master = cluster.startMaster().getMaster();
        for (int i = 0; i < 3; i++) {
            cluster.getConf().setInt(HConstants.REGIONSERVER_PORT, rsPorts[i]);
            cluster.startRegionServer();
        }
    } finally {
        // Reset region server port so as not to conflict with other tests
        cluster.getConf().setInt(HConstants.REGIONSERVER_PORT, 0);
        cluster.getConf().setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, 2);
    }
    // Make sure live regionservers are on the same host/port
    List<ServerName> localServers = master.getServerManager().getOnlineServersList();
    assertEquals(4, localServers.size());
    for (int i = 0; i < 3; i++) {
        boolean found = false;
        for (ServerName serverName : localServers) {
            if (serverName.getPort() == rsPorts[i]) {
                found = true;
                break;
            }
        }
        assertTrue(found);
    }
    // Wait till master is initialized and all regions are assigned
    RegionStates regionStates = master.getAssignmentManager().getRegionStates();
    int expectedRegions = regionToRegionServerMap.size() + 1;
    while (!master.isInitialized() || regionStates.getRegionAssignments().size() != expectedRegions) {
        Threads.sleep(100);
    }
    snapshot = new SnapshotOfRegionAssignmentFromMeta(master.getConnection());
    snapshot.initialize();
    Map<HRegionInfo, ServerName> newRegionToRegionServerMap = snapshot.getRegionToRegionServerMap();
    assertEquals(regionToRegionServerMap.size(), newRegionToRegionServerMap.size());
    for (Map.Entry<HRegionInfo, ServerName> entry : newRegionToRegionServerMap.entrySet()) {
        if (TableName.NAMESPACE_TABLE_NAME.equals(entry.getKey().getTable()))
            continue;
        ServerName oldServer = regionToRegionServerMap.get(entry.getKey());
        ServerName currentServer = entry.getValue();
        assertEquals(oldServer.getHostAndPort(), currentServer.getHostAndPort());
        assertNotEquals(oldServer.getStartcode(), currentServer.getStartcode());
    }
}
Also used : MiniHBaseCluster(org.apache.hadoop.hbase.MiniHBaseCluster) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableName(org.apache.hadoop.hbase.TableName) ServerName(org.apache.hadoop.hbase.ServerName) Map(java.util.Map) Test(org.junit.Test)

Example 77 with MiniHBaseCluster

use of org.apache.hadoop.hbase.MiniHBaseCluster in project hbase by apache.

the class TestSimpleRegionNormalizerOnCluster method testRegionNormalizationMergeOnCluster.

@Test(timeout = 60000)
@SuppressWarnings("deprecation")
public void testRegionNormalizationMergeOnCluster() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
    HMaster m = cluster.getMaster();
    // create 5 regions with sizes to trigger merge of small regions
    try (Table ht = TEST_UTIL.createMultiRegionTable(tableName, FAMILYNAME, 5)) {
        // Need to get sorted list of regions here
        List<HRegion> generatedRegions = TEST_UTIL.getHBaseCluster().getRegions(tableName);
        Collections.sort(generatedRegions, new Comparator<HRegion>() {

            @Override
            public int compare(HRegion o1, HRegion o2) {
                return o1.getRegionInfo().compareTo(o2.getRegionInfo());
            }
        });
        HRegion region = generatedRegions.get(0);
        generateTestData(region, 1);
        region.flush(true);
        region = generatedRegions.get(1);
        generateTestData(region, 1);
        region.flush(true);
        region = generatedRegions.get(2);
        generateTestData(region, 3);
        region.flush(true);
        region = generatedRegions.get(3);
        generateTestData(region, 3);
        region.flush(true);
        region = generatedRegions.get(4);
        generateTestData(region, 5);
        region.flush(true);
    }
    HTableDescriptor htd = admin.getTableDescriptor(tableName);
    htd.setNormalizationEnabled(true);
    admin.modifyTable(tableName, htd);
    admin.flush(tableName);
    assertEquals(5, MetaTableAccessor.getRegionCount(TEST_UTIL.getConnection(), tableName));
    // Now trigger a merge and stop when the merge is in progress
    // to let region load to update
    Thread.sleep(5000);
    m.normalizeRegions();
    while (MetaTableAccessor.getRegionCount(TEST_UTIL.getConnection(), tableName) > 4) {
        LOG.info("Waiting for normalization merge to complete");
        Thread.sleep(100);
    }
    assertEquals(4, MetaTableAccessor.getRegionCount(TEST_UTIL.getConnection(), tableName));
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
}
Also used : TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Table(org.apache.hadoop.hbase.client.Table) HMaster(org.apache.hadoop.hbase.master.HMaster) MiniHBaseCluster(org.apache.hadoop.hbase.MiniHBaseCluster) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Test(org.junit.Test)

Example 78 with MiniHBaseCluster

use of org.apache.hadoop.hbase.MiniHBaseCluster in project hbase by apache.

the class TestNamespaceAuditor method testRegionMerge.

@Test
public void testRegionMerge() throws Exception {
    String nsp1 = prefix + "_regiontest";
    NamespaceDescriptor nspDesc = NamespaceDescriptor.create(nsp1).addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "3").addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2").build();
    ADMIN.createNamespace(nspDesc);
    final TableName tableTwo = TableName.valueOf(nsp1 + TableName.NAMESPACE_DELIM + "table2");
    byte[] columnFamily = Bytes.toBytes("info");
    HTableDescriptor tableDescOne = new HTableDescriptor(tableTwo);
    tableDescOne.addFamily(new HColumnDescriptor(columnFamily));
    final int initialRegions = 3;
    ADMIN.createTable(tableDescOne, Bytes.toBytes("1"), Bytes.toBytes("2000"), initialRegions);
    Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration());
    try (Table table = connection.getTable(tableTwo)) {
        UTIL.loadNumericRows(table, Bytes.toBytes("info"), 1000, 1999);
    }
    ADMIN.flush(tableTwo);
    List<HRegionInfo> hris = ADMIN.getTableRegions(tableTwo);
    Collections.sort(hris);
    // merge the two regions
    final Set<String> encodedRegionNamesToMerge = Sets.newHashSet(hris.get(0).getEncodedName(), hris.get(1).getEncodedName());
    ADMIN.mergeRegionsAsync(hris.get(0).getEncodedNameAsBytes(), hris.get(1).getEncodedNameAsBytes(), false);
    UTIL.waitFor(10000, 100, new Waiter.ExplainingPredicate<Exception>() {

        @Override
        public boolean evaluate() throws Exception {
            RegionStates regionStates = UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates();
            for (HRegionInfo hri : ADMIN.getTableRegions(tableTwo)) {
                if (encodedRegionNamesToMerge.contains(hri.getEncodedName())) {
                    return false;
                }
                if (!regionStates.isRegionInState(hri, RegionState.State.OPEN)) {
                    return false;
                }
            }
            return true;
        }

        @Override
        public String explainFailure() throws Exception {
            RegionStates regionStates = UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates();
            for (HRegionInfo hri : ADMIN.getTableRegions(tableTwo)) {
                if (encodedRegionNamesToMerge.contains(hri.getEncodedName())) {
                    return hri + " which is expected to be merged is still online";
                }
                if (!regionStates.isRegionInState(hri, RegionState.State.OPEN)) {
                    return hri + " is still in not opened";
                }
            }
            return "Unknown";
        }
    });
    hris = ADMIN.getTableRegions(tableTwo);
    assertEquals(initialRegions - 1, hris.size());
    Collections.sort(hris);
    final HRegionInfo hriToSplit = hris.get(1);
    ADMIN.split(tableTwo, Bytes.toBytes("500"));
    UTIL.waitFor(10000, 100, new Waiter.ExplainingPredicate<Exception>() {

        @Override
        public boolean evaluate() throws Exception {
            RegionStates regionStates = UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates();
            for (HRegionInfo hri : ADMIN.getTableRegions(tableTwo)) {
                if (hri.getEncodedName().equals(hriToSplit.getEncodedName())) {
                    return false;
                }
                if (!regionStates.isRegionInState(hri, RegionState.State.OPEN)) {
                    return false;
                }
            }
            return true;
        }

        @Override
        public String explainFailure() throws Exception {
            RegionStates regionStates = UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates();
            for (HRegionInfo hri : ADMIN.getTableRegions(tableTwo)) {
                if (hri.getEncodedName().equals(hriToSplit.getEncodedName())) {
                    return hriToSplit + " which is expected to be split is still online";
                }
                if (!regionStates.isRegionInState(hri, RegionState.State.OPEN)) {
                    return hri + " is still in not opened";
                }
            }
            return "Unknown";
        }
    });
    hris = ADMIN.getTableRegions(tableTwo);
    assertEquals(initialRegions, hris.size());
    Collections.sort(hris);
    // fail region merge through Coprocessor hook
    MiniHBaseCluster cluster = UTIL.getHBaseCluster();
    MasterCoprocessorHost cpHost = cluster.getMaster().getMasterCoprocessorHost();
    Coprocessor coprocessor = cpHost.findCoprocessor(CPMasterObserver.class.getName());
    CPMasterObserver masterObserver = (CPMasterObserver) coprocessor;
    masterObserver.failMerge(true);
    masterObserver.triggered = false;
    ADMIN.mergeRegionsAsync(hris.get(1).getEncodedNameAsBytes(), hris.get(2).getEncodedNameAsBytes(), false);
    masterObserver.waitUtilTriggered();
    hris = ADMIN.getTableRegions(tableTwo);
    assertEquals(initialRegions, hris.size());
    Collections.sort(hris);
    // verify that we cannot split
    HRegionInfo hriToSplit2 = hris.get(1);
    ADMIN.split(tableTwo, TableInputFormatBase.getSplitKey(hriToSplit2.getStartKey(), hriToSplit2.getEndKey(), true));
    Thread.sleep(2000);
    assertEquals(initialRegions, ADMIN.getTableRegions(tableTwo).size());
}
Also used : MasterCoprocessorHost(org.apache.hadoop.hbase.master.MasterCoprocessorHost) Table(org.apache.hadoop.hbase.client.Table) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) Connection(org.apache.hadoop.hbase.client.Connection) MiniHBaseCluster(org.apache.hadoop.hbase.MiniHBaseCluster) RestoreSnapshotException(org.apache.hadoop.hbase.snapshot.RestoreSnapshotException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) KeeperException(org.apache.zookeeper.KeeperException) IOException(java.io.IOException) QuotaExceededException(org.apache.hadoop.hbase.quotas.QuotaExceededException) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableName(org.apache.hadoop.hbase.TableName) RegionStates(org.apache.hadoop.hbase.master.RegionStates) Coprocessor(org.apache.hadoop.hbase.Coprocessor) NamespaceDescriptor(org.apache.hadoop.hbase.NamespaceDescriptor) Waiter(org.apache.hadoop.hbase.Waiter) Test(org.junit.Test)

Example 79 with MiniHBaseCluster

use of org.apache.hadoop.hbase.MiniHBaseCluster in project hbase by apache.

the class TestFSErrorsExposed method testFullSystemBubblesFSErrors.

/**
   * Cluster test which starts a region server with a region, then
   * removes the data from HDFS underneath it, and ensures that
   * errors are bubbled to the client.
   */
@Test(timeout = 5 * 60 * 1000)
public void testFullSystemBubblesFSErrors() throws Exception {
    // We won't have an error if the datanode is not there if we use short circuit
    //  it's a known 'feature'.
    Assume.assumeTrue(!util.isReadShortCircuitOn());
    try {
        // Make it fail faster.
        util.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
        util.getConfiguration().setInt("hbase.lease.recovery.timeout", 10000);
        util.getConfiguration().setInt("hbase.lease.recovery.dfs.timeout", 1000);
        util.startMiniCluster(1);
        final TableName tableName = TableName.valueOf(name.getMethodName());
        byte[] fam = Bytes.toBytes("fam");
        Admin admin = util.getAdmin();
        HTableDescriptor desc = new HTableDescriptor(tableName);
        desc.addFamily(new HColumnDescriptor(fam).setMaxVersions(1).setBlockCacheEnabled(false));
        admin.createTable(desc);
        // above configuration on it; else we use the old one w/ 10 as default.
        try (Table table = util.getConnection().getTable(tableName)) {
            // Load some data
            util.loadTable(table, fam, false);
            util.flush();
            util.countRows(table);
            // Kill the DFS cluster
            util.getDFSCluster().shutdownDataNodes();
            try {
                util.countRows(table);
                fail("Did not fail to count after removing data");
            } catch (Exception e) {
                LOG.info("Got expected error", e);
                assertTrue(e.getMessage().contains("Could not seek"));
            }
        }
        // Restart data nodes so that HBase can shut down cleanly.
        util.getDFSCluster().restartDataNodes();
    } finally {
        MiniHBaseCluster cluster = util.getMiniHBaseCluster();
        if (cluster != null)
            cluster.killAll();
        util.shutdownMiniCluster();
    }
}
Also used : TableName(org.apache.hadoop.hbase.TableName) Table(org.apache.hadoop.hbase.client.Table) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) MiniHBaseCluster(org.apache.hadoop.hbase.MiniHBaseCluster) Admin(org.apache.hadoop.hbase.client.Admin) IOException(java.io.IOException) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Test(org.junit.Test)

Example 80 with MiniHBaseCluster

use of org.apache.hadoop.hbase.MiniHBaseCluster in project hbase by apache.

the class TestJoinedScanners method testJoinedScanners.

@Test
public void testJoinedScanners() throws Exception {
    String[] dataNodeHosts = new String[] { "host1", "host2", "host3" };
    int regionServersCount = 3;
    HBaseTestingUtility htu = new HBaseTestingUtility();
    final int DEFAULT_BLOCK_SIZE = 1024 * 1024;
    htu.getConfiguration().setLong("dfs.blocksize", DEFAULT_BLOCK_SIZE);
    htu.getConfiguration().setInt("dfs.replication", 1);
    htu.getConfiguration().setLong("hbase.hregion.max.filesize", 322122547200L);
    MiniHBaseCluster cluster = null;
    try {
        cluster = htu.startMiniCluster(1, regionServersCount, dataNodeHosts);
        byte[][] families = { cf_essential, cf_joined };
        final TableName tableName = TableName.valueOf(name.getMethodName());
        HTableDescriptor desc = new HTableDescriptor(tableName);
        for (byte[] family : families) {
            HColumnDescriptor hcd = new HColumnDescriptor(family);
            hcd.setDataBlockEncoding(blockEncoding);
            desc.addFamily(hcd);
        }
        htu.getAdmin().createTable(desc);
        Table ht = htu.getConnection().getTable(tableName);
        long rows_to_insert = 1000;
        int insert_batch = 20;
        long time = System.nanoTime();
        Random rand = new Random(time);
        LOG.info("Make " + Long.toString(rows_to_insert) + " rows, total size = " + Float.toString(rows_to_insert * valueWidth / 1024 / 1024) + " MB");
        byte[] val_large = new byte[valueWidth];
        List<Put> puts = new ArrayList<>();
        for (long i = 0; i < rows_to_insert; i++) {
            Put put = new Put(Bytes.toBytes(Long.toString(i)));
            if (rand.nextInt(100) <= selectionRatio) {
                put.addColumn(cf_essential, col_name, flag_yes);
            } else {
                put.addColumn(cf_essential, col_name, flag_no);
            }
            put.addColumn(cf_joined, col_name, val_large);
            puts.add(put);
            if (puts.size() >= insert_batch) {
                ht.put(puts);
                puts.clear();
            }
        }
        if (puts.size() >= 0) {
            ht.put(puts);
            puts.clear();
        }
        LOG.info("Data generated in " + Double.toString((System.nanoTime() - time) / 1000000000.0) + " seconds");
        boolean slow = true;
        for (int i = 0; i < 10; ++i) {
            runScanner(ht, slow);
            slow = !slow;
        }
        ht.close();
    } finally {
        if (cluster != null) {
            htu.shutdownMiniCluster();
        }
    }
}
Also used : Table(org.apache.hadoop.hbase.client.Table) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) ArrayList(java.util.ArrayList) MiniHBaseCluster(org.apache.hadoop.hbase.MiniHBaseCluster) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) TableName(org.apache.hadoop.hbase.TableName) Random(java.util.Random) HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) Test(org.junit.Test)

Aggregations

MiniHBaseCluster (org.apache.hadoop.hbase.MiniHBaseCluster)86 Test (org.junit.Test)58 TableName (org.apache.hadoop.hbase.TableName)32 HRegionServer (org.apache.hadoop.hbase.regionserver.HRegionServer)25 Table (org.apache.hadoop.hbase.client.Table)23 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)21 HMaster (org.apache.hadoop.hbase.master.HMaster)21 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)20 ServerName (org.apache.hadoop.hbase.ServerName)19 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)19 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)18 JVMClusterUtil (org.apache.hadoop.hbase.util.JVMClusterUtil)14 Admin (org.apache.hadoop.hbase.client.Admin)12 MasterCoprocessorHost (org.apache.hadoop.hbase.master.MasterCoprocessorHost)12 IOException (java.io.IOException)11 Region (org.apache.hadoop.hbase.regionserver.Region)11 HBaseTestingUtility (org.apache.hadoop.hbase.HBaseTestingUtility)10 Put (org.apache.hadoop.hbase.client.Put)8 TreeMap (java.util.TreeMap)7 Waiter (org.apache.hadoop.hbase.Waiter)7