Search in sources :

Example 21 with StartTestingClusterOption

use of org.apache.hadoop.hbase.StartTestingClusterOption in project hbase by apache.

the class TestRegionServerAbortTimeout method setUp.

@BeforeClass
public static void setUp() throws Exception {
    Configuration conf = UTIL.getConfiguration();
    // Will schedule a abort timeout task after SLEEP_TIME_WHEN_CLOSE_REGION ms
    conf.setLong(HRegionServer.ABORT_TIMEOUT, SLEEP_TIME_WHEN_CLOSE_REGION);
    conf.set(HRegionServer.ABORT_TIMEOUT_TASK, TestAbortTimeoutTask.class.getName());
    StartTestingClusterOption option = StartTestingClusterOption.builder().numRegionServers(2).build();
    UTIL.startMiniCluster(option);
    TableDescriptor td = TableDescriptorBuilder.newBuilder(TABLE_NAME).setCoprocessor(SleepWhenCloseCoprocessor.class.getName()).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(CF).build()).build();
    UTIL.getAdmin().createTable(td, Bytes.toBytes("0"), Bytes.toBytes("9"), REGIONS_NUM);
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) StartTestingClusterOption(org.apache.hadoop.hbase.StartTestingClusterOption) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) BeforeClass(org.junit.BeforeClass)

Example 22 with StartTestingClusterOption

use of org.apache.hadoop.hbase.StartTestingClusterOption in project hbase by apache.

the class AbstractTestDLS method startCluster.

private void startCluster(int numRS) throws Exception {
    SplitLogCounters.resetCounters();
    LOG.info("Starting cluster");
    conf.setLong("hbase.splitlog.max.resubmit", 0);
    // Make the failure test faster
    conf.setInt("zookeeper.recovery.retry", 0);
    conf.setInt(HConstants.REGIONSERVER_INFO_PORT, -1);
    // no load balancing
    conf.setFloat(HConstants.LOAD_BALANCER_SLOP_KEY, (float) 100.0);
    conf.setInt(HBASE_SPLIT_WAL_MAX_SPLITTER, 3);
    conf.setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 10);
    conf.set("hbase.wal.provider", getWalProvider());
    StartTestingClusterOption option = StartTestingClusterOption.builder().numMasters(NUM_MASTERS).numRegionServers(numRS).build();
    TEST_UTIL.startMiniHBaseCluster(option);
    cluster = TEST_UTIL.getHBaseCluster();
    LOG.info("Waiting for active/ready master");
    cluster.waitForActiveAndReadyMaster();
    master = cluster.getMaster();
    TEST_UTIL.waitFor(120000, 200, new Waiter.Predicate<Exception>() {

        @Override
        public boolean evaluate() throws Exception {
            return cluster.getLiveRegionServerThreads().size() >= numRS;
        }
    });
}
Also used : Waiter(org.apache.hadoop.hbase.Waiter) StartTestingClusterOption(org.apache.hadoop.hbase.StartTestingClusterOption) TimeoutException(java.util.concurrent.TimeoutException) IOException(java.io.IOException)

Example 23 with StartTestingClusterOption

use of org.apache.hadoop.hbase.StartTestingClusterOption in project hbase by apache.

the class SyncReplicationTestBase method setUp.

@BeforeClass
public static void setUp() throws Exception {
    ZK_UTIL.startMiniZKCluster();
    initTestingUtility(UTIL1, "/cluster1");
    initTestingUtility(UTIL2, "/cluster2");
    StartTestingClusterOption option = StartTestingClusterOption.builder().numMasters(2).numRegionServers(3).numDataNodes(3).build();
    UTIL1.startMiniCluster(option);
    UTIL2.startMiniCluster(option);
    TableDescriptor td = TableDescriptorBuilder.newBuilder(TABLE_NAME).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(CF).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()).build();
    UTIL1.getAdmin().createTable(td);
    UTIL2.getAdmin().createTable(td);
    FileSystem fs1 = UTIL1.getTestFileSystem();
    FileSystem fs2 = UTIL2.getTestFileSystem();
    REMOTE_WAL_DIR1 = new Path(UTIL1.getMiniHBaseCluster().getMaster().getMasterFileSystem().getWALRootDir(), "remoteWALs").makeQualified(fs1.getUri(), fs1.getWorkingDirectory());
    REMOTE_WAL_DIR2 = new Path(UTIL2.getMiniHBaseCluster().getMaster().getMasterFileSystem().getWALRootDir(), "remoteWALs").makeQualified(fs2.getUri(), fs2.getWorkingDirectory());
    UTIL1.getAdmin().addReplicationPeer(PEER_ID, ReplicationPeerConfig.newBuilder().setClusterKey(UTIL2.getClusterKey()).setReplicateAllUserTables(false).setTableCFsMap(ImmutableMap.of(TABLE_NAME, new ArrayList<>())).setRemoteWALDir(REMOTE_WAL_DIR2.toUri().toString()).build());
    UTIL2.getAdmin().addReplicationPeer(PEER_ID, ReplicationPeerConfig.newBuilder().setClusterKey(UTIL1.getClusterKey()).setReplicateAllUserTables(false).setTableCFsMap(ImmutableMap.of(TABLE_NAME, new ArrayList<>())).setRemoteWALDir(REMOTE_WAL_DIR1.toUri().toString()).build());
}
Also used : Path(org.apache.hadoop.fs.Path) FileSystem(org.apache.hadoop.fs.FileSystem) MasterFileSystem(org.apache.hadoop.hbase.master.MasterFileSystem) StartTestingClusterOption(org.apache.hadoop.hbase.StartTestingClusterOption) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) BeforeClass(org.junit.BeforeClass)

Example 24 with StartTestingClusterOption

use of org.apache.hadoop.hbase.StartTestingClusterOption in project hbase by apache.

the class TestHFileOutputFormat2 method doIncrementalLoadTest.

private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKeepLocality, boolean putSortReducer, List<String> tableStr) throws Exception {
    util = new HBaseTestingUtil();
    Configuration conf = util.getConfiguration();
    conf.setBoolean(MultiTableHFileOutputFormat.LOCALITY_SENSITIVE_CONF_KEY, shouldKeepLocality);
    int hostCount = 1;
    int regionNum = 5;
    if (shouldKeepLocality) {
        // We should change host count higher than hdfs replica count when MiniHBaseCluster supports
        // explicit hostnames parameter just like MiniDFSCluster does.
        hostCount = 3;
        regionNum = 20;
    }
    String[] hostnames = new String[hostCount];
    for (int i = 0; i < hostCount; ++i) {
        hostnames[i] = "datanode_" + i;
    }
    StartTestingClusterOption option = StartTestingClusterOption.builder().numRegionServers(hostCount).dataNodeHosts(hostnames).build();
    util.startMiniCluster(option);
    Map<String, Table> allTables = new HashMap<>(tableStr.size());
    List<HFileOutputFormat2.TableInfo> tableInfo = new ArrayList<>(tableStr.size());
    boolean writeMultipleTables = tableStr.size() > 1;
    for (String tableStrSingle : tableStr) {
        byte[][] splitKeys = generateRandomSplitKeys(regionNum - 1);
        TableName tableName = TableName.valueOf(tableStrSingle);
        Table table = util.createTable(tableName, FAMILIES, splitKeys);
        RegionLocator r = util.getConnection().getRegionLocator(tableName);
        assertEquals("Should start with empty table", 0, util.countRows(table));
        int numRegions = r.getStartKeys().length;
        assertEquals("Should make " + regionNum + " regions", numRegions, regionNum);
        allTables.put(tableStrSingle, table);
        tableInfo.add(new HFileOutputFormat2.TableInfo(table.getDescriptor(), r));
    }
    Path testDir = util.getDataTestDirOnTestFS("testLocalMRIncrementalLoad");
    // Generate the bulk load files
    runIncrementalPELoad(conf, tableInfo, testDir, putSortReducer);
    if (writeMultipleTables) {
        testDir = new Path(testDir, "default");
    }
    for (Table tableSingle : allTables.values()) {
        // This doesn't write into the table, just makes files
        assertEquals("HFOF should not touch actual table", 0, util.countRows(tableSingle));
    }
    int numTableDirs = 0;
    FileStatus[] fss = testDir.getFileSystem(conf).listStatus(testDir);
    for (FileStatus tf : fss) {
        Path tablePath = testDir;
        if (writeMultipleTables) {
            if (allTables.containsKey(tf.getPath().getName())) {
                ++numTableDirs;
                tablePath = tf.getPath();
            } else {
                continue;
            }
        }
        // Make sure that a directory was created for every CF
        int dir = 0;
        fss = tablePath.getFileSystem(conf).listStatus(tablePath);
        for (FileStatus f : fss) {
            for (byte[] family : FAMILIES) {
                if (Bytes.toString(family).equals(f.getPath().getName())) {
                    ++dir;
                }
            }
        }
        assertEquals("Column family not found in FS.", FAMILIES.length, dir);
    }
    if (writeMultipleTables) {
        assertEquals("Dir for all input tables not created", numTableDirs, allTables.size());
    }
    Admin admin = util.getConnection().getAdmin();
    try {
        // handle the split case
        if (shouldChangeRegions) {
            Table chosenTable = allTables.values().iterator().next();
            // Choose a semi-random table if multiple tables are available
            LOG.info("Changing regions in table " + chosenTable.getName().getNameAsString());
            admin.disableTable(chosenTable.getName());
            util.waitUntilNoRegionsInTransition();
            util.deleteTable(chosenTable.getName());
            byte[][] newSplitKeys = generateRandomSplitKeys(14);
            Table table = util.createTable(chosenTable.getName(), FAMILIES, newSplitKeys);
            while (util.getConnection().getRegionLocator(chosenTable.getName()).getAllRegionLocations().size() != 15 || !admin.isTableAvailable(table.getName())) {
                Thread.sleep(200);
                LOG.info("Waiting for new region assignment to happen");
            }
        }
        // Perform the actual load
        for (HFileOutputFormat2.TableInfo singleTableInfo : tableInfo) {
            Path tableDir = testDir;
            String tableNameStr = singleTableInfo.getTableDescriptor().getTableName().getNameAsString();
            LOG.info("Running BulkLoadHFiles on table" + tableNameStr);
            if (writeMultipleTables) {
                tableDir = new Path(testDir, tableNameStr);
            }
            Table currentTable = allTables.get(tableNameStr);
            TableName currentTableName = currentTable.getName();
            BulkLoadHFiles.create(conf).bulkLoad(currentTableName, tableDir);
            // Ensure data shows up
            int expectedRows = 0;
            if (putSortReducer) {
                // no rows should be extracted
                assertEquals("BulkLoadHFiles should put expected data in table", expectedRows, util.countRows(currentTable));
            } else {
                expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
                assertEquals("BulkLoadHFiles should put expected data in table", expectedRows, util.countRows(currentTable));
                Scan scan = new Scan();
                ResultScanner results = currentTable.getScanner(scan);
                for (Result res : results) {
                    assertEquals(FAMILIES.length, res.rawCells().length);
                    Cell first = res.rawCells()[0];
                    for (Cell kv : res.rawCells()) {
                        assertTrue(CellUtil.matchingRows(first, kv));
                        assertTrue(Bytes.equals(CellUtil.cloneValue(first), CellUtil.cloneValue(kv)));
                    }
                }
                results.close();
            }
            String tableDigestBefore = util.checksumRows(currentTable);
            // Check region locality
            HDFSBlocksDistribution hbd = new HDFSBlocksDistribution();
            for (HRegion region : util.getHBaseCluster().getRegions(currentTableName)) {
                hbd.add(region.getHDFSBlocksDistribution());
            }
            for (String hostname : hostnames) {
                float locality = hbd.getBlockLocalityIndex(hostname);
                LOG.info("locality of [" + hostname + "]: " + locality);
                assertEquals(100, (int) (locality * 100));
            }
            // Cause regions to reopen
            admin.disableTable(currentTableName);
            while (!admin.isTableDisabled(currentTableName)) {
                Thread.sleep(200);
                LOG.info("Waiting for table to disable");
            }
            admin.enableTable(currentTableName);
            util.waitTableAvailable(currentTableName);
            assertEquals("Data should remain after reopening of regions", tableDigestBefore, util.checksumRows(currentTable));
        }
    } finally {
        for (HFileOutputFormat2.TableInfo tableInfoSingle : tableInfo) {
            tableInfoSingle.getRegionLocator().close();
        }
        for (Entry<String, Table> singleTable : allTables.entrySet()) {
            singleTable.getValue().close();
            util.deleteTable(singleTable.getValue().getName());
        }
        testDir.getFileSystem(conf).delete(testDir, true);
        util.shutdownMiniCluster();
    }
}
Also used : FileStatus(org.apache.hadoop.fs.FileStatus) LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) ArrayList(java.util.ArrayList) Result(org.apache.hadoop.hbase.client.Result) Cell(org.apache.hadoop.hbase.Cell) Path(org.apache.hadoop.fs.Path) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) HBaseTestingUtil(org.apache.hadoop.hbase.HBaseTestingUtil) Admin(org.apache.hadoop.hbase.client.Admin) HDFSBlocksDistribution(org.apache.hadoop.hbase.HDFSBlocksDistribution) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Scan(org.apache.hadoop.hbase.client.Scan) StartTestingClusterOption(org.apache.hadoop.hbase.StartTestingClusterOption)

Example 25 with StartTestingClusterOption

use of org.apache.hadoop.hbase.StartTestingClusterOption in project hbase by apache.

the class TestHFileOutputFormat2 method testMRIncrementalLoadWithLocalityMultiCluster.

@Test
public void testMRIncrementalLoadWithLocalityMultiCluster() throws Exception {
    // Start cluster A
    util = new HBaseTestingUtil();
    Configuration confA = util.getConfiguration();
    int hostCount = 3;
    int regionNum = 20;
    String[] hostnames = new String[hostCount];
    for (int i = 0; i < hostCount; ++i) {
        hostnames[i] = "datanode_" + i;
    }
    StartTestingClusterOption option = StartTestingClusterOption.builder().numRegionServers(hostCount).dataNodeHosts(hostnames).build();
    util.startMiniCluster(option);
    // Start cluster B
    HBaseTestingUtil utilB = new HBaseTestingUtil();
    Configuration confB = utilB.getConfiguration();
    utilB.startMiniCluster(option);
    Path testDir = util.getDataTestDirOnTestFS("testLocalMRIncrementalLoad");
    byte[][] splitKeys = generateRandomSplitKeys(regionNum - 1);
    TableName tableName = TableName.valueOf("table");
    // Create table in cluster B
    try (Table table = utilB.createTable(tableName, FAMILIES, splitKeys);
        RegionLocator r = utilB.getConnection().getRegionLocator(tableName)) {
        // Generate the bulk load files
        // Job has zookeeper configuration for cluster A
        // Assume reading from cluster A by TableInputFormat and creating hfiles to cluster B
        Job job = new Job(confA, "testLocalMRIncrementalLoad");
        Configuration jobConf = job.getConfiguration();
        final UUID key = ConfigurationCaptorConnection.configureConnectionImpl(jobConf);
        job.setWorkingDirectory(util.getDataTestDirOnTestFS("runIncrementalPELoad"));
        setupRandomGeneratorMapper(job, false);
        HFileOutputFormat2.configureIncrementalLoad(job, table, r);
        assertEquals(confB.get(HConstants.ZOOKEEPER_QUORUM), jobConf.get(HFileOutputFormat2.REMOTE_CLUSTER_ZOOKEEPER_QUORUM_CONF_KEY));
        assertEquals(confB.get(HConstants.ZOOKEEPER_CLIENT_PORT), jobConf.get(HFileOutputFormat2.REMOTE_CLUSTER_ZOOKEEPER_CLIENT_PORT_CONF_KEY));
        assertEquals(confB.get(HConstants.ZOOKEEPER_ZNODE_PARENT), jobConf.get(HFileOutputFormat2.REMOTE_CLUSTER_ZOOKEEPER_ZNODE_PARENT_CONF_KEY));
        String bSpecificConfigKey = "my.override.config.for.b";
        String bSpecificConfigValue = "b-specific-value";
        jobConf.set(HFileOutputFormat2.REMOTE_CLUSTER_CONF_PREFIX + bSpecificConfigKey, bSpecificConfigValue);
        FileOutputFormat.setOutputPath(job, testDir);
        assertFalse(util.getTestFileSystem().exists(testDir));
        assertTrue(job.waitForCompletion(true));
        final List<Configuration> configs = ConfigurationCaptorConnection.getCapturedConfigarutions(key);
        assertFalse(configs.isEmpty());
        for (Configuration config : configs) {
            assertEquals(confB.get(HConstants.ZOOKEEPER_QUORUM), config.get(HConstants.ZOOKEEPER_QUORUM));
            assertEquals(confB.get(HConstants.ZOOKEEPER_CLIENT_PORT), config.get(HConstants.ZOOKEEPER_CLIENT_PORT));
            assertEquals(confB.get(HConstants.ZOOKEEPER_ZNODE_PARENT), config.get(HConstants.ZOOKEEPER_ZNODE_PARENT));
            assertEquals(bSpecificConfigValue, config.get(bSpecificConfigKey));
        }
    } finally {
        utilB.deleteTable(tableName);
        testDir.getFileSystem(confA).delete(testDir, true);
        util.shutdownMiniCluster();
        utilB.shutdownMiniCluster();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) HBaseTestingUtil(org.apache.hadoop.hbase.HBaseTestingUtil) TableName(org.apache.hadoop.hbase.TableName) Job(org.apache.hadoop.mapreduce.Job) UUID(java.util.UUID) StartTestingClusterOption(org.apache.hadoop.hbase.StartTestingClusterOption) Test(org.junit.Test)

Aggregations

StartTestingClusterOption (org.apache.hadoop.hbase.StartTestingClusterOption)42 BeforeClass (org.junit.BeforeClass)21 HBaseTestingUtil (org.apache.hadoop.hbase.HBaseTestingUtil)13 Test (org.junit.Test)13 Configuration (org.apache.hadoop.conf.Configuration)10 SingleProcessHBaseCluster (org.apache.hadoop.hbase.SingleProcessHBaseCluster)8 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)5 TableName (org.apache.hadoop.hbase.TableName)5 Table (org.apache.hadoop.hbase.client.Table)5 MasterThread (org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread)5 ServerName (org.apache.hadoop.hbase.ServerName)4 RegionLocator (org.apache.hadoop.hbase.client.RegionLocator)4 Path (org.apache.hadoop.fs.Path)3 ClusterMetrics (org.apache.hadoop.hbase.ClusterMetrics)3 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)3 Before (org.junit.Before)3 InetAddress (java.net.InetAddress)2 NetworkInterface (java.net.NetworkInterface)2 ArrayList (java.util.ArrayList)2 HashMap (java.util.HashMap)2