Search in sources :

Example 31 with HBaseTestingUtility

use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.

the class TestHFileOutputFormat2 method testBlockStoragePolicy.

@Test
public void testBlockStoragePolicy() throws Exception {
    util = new HBaseTestingUtility();
    Configuration conf = util.getConfiguration();
    conf.set(HFileOutputFormat2.STORAGE_POLICY_PROPERTY, "ALL_SSD");
    conf.set(HFileOutputFormat2.STORAGE_POLICY_PROPERTY_CF_PREFIX + Bytes.toString(FAMILIES[0]), "ONE_SSD");
    Path cf1Dir = new Path(util.getDataTestDir(), Bytes.toString(FAMILIES[0]));
    Path cf2Dir = new Path(util.getDataTestDir(), Bytes.toString(FAMILIES[1]));
    util.startMiniDFSCluster(3);
    FileSystem fs = util.getDFSCluster().getFileSystem();
    try {
        fs.mkdirs(cf1Dir);
        fs.mkdirs(cf2Dir);
        // the original block storage policy would be HOT
        String spA = getStoragePolicyName(fs, cf1Dir);
        String spB = getStoragePolicyName(fs, cf2Dir);
        LOG.debug("Storage policy of cf 0: [" + spA + "].");
        LOG.debug("Storage policy of cf 1: [" + spB + "].");
        assertEquals("HOT", spA);
        assertEquals("HOT", spB);
        // alter table cf schema to change storage policies
        HFileOutputFormat2.configureStoragePolicy(conf, fs, FAMILIES[0], cf1Dir);
        HFileOutputFormat2.configureStoragePolicy(conf, fs, FAMILIES[1], cf2Dir);
        spA = getStoragePolicyName(fs, cf1Dir);
        spB = getStoragePolicyName(fs, cf2Dir);
        LOG.debug("Storage policy of cf 0: [" + spA + "].");
        LOG.debug("Storage policy of cf 1: [" + spB + "].");
        assertNotNull(spA);
        assertEquals("ONE_SSD", spA);
        assertNotNull(spB);
        assertEquals("ALL_SSD", spB);
    } finally {
        fs.delete(cf1Dir, true);
        fs.delete(cf2Dir, true);
        util.shutdownMiniDFSCluster();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Test(org.junit.Test)

Example 32 with HBaseTestingUtility

use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.

the class TestHFileOutputFormat2 method doIncrementalLoadTest.

private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKeepLocality, boolean putSortReducer, String tableStr) throws Exception {
    util = new HBaseTestingUtility();
    Configuration conf = util.getConfiguration();
    conf.setBoolean(HFileOutputFormat2.LOCALITY_SENSITIVE_CONF_KEY, shouldKeepLocality);
    int hostCount = 1;
    int regionNum = 5;
    if (shouldKeepLocality) {
        // We should change host count higher than hdfs replica count when MiniHBaseCluster supports
        // explicit hostnames parameter just like MiniDFSCluster does.
        hostCount = 3;
        regionNum = 20;
    }
    byte[][] splitKeys = generateRandomSplitKeys(regionNum - 1);
    String[] hostnames = new String[hostCount];
    for (int i = 0; i < hostCount; ++i) {
        hostnames[i] = "datanode_" + i;
    }
    util.startMiniCluster(1, hostCount, hostnames);
    TableName tableName = TableName.valueOf(tableStr);
    Table table = util.createTable(tableName, FAMILIES, splitKeys);
    Path testDir = util.getDataTestDirOnTestFS("testLocalMRIncrementalLoad");
    FileSystem fs = testDir.getFileSystem(conf);
    try (RegionLocator r = util.getConnection().getRegionLocator(tableName);
        Admin admin = util.getConnection().getAdmin()) {
        assertEquals("Should start with empty table", 0, util.countRows(table));
        int numRegions = r.getStartKeys().length;
        assertEquals("Should make " + regionNum + " regions", numRegions, regionNum);
        // Generate the bulk load files
        runIncrementalPELoad(conf, table.getTableDescriptor(), r, testDir, putSortReducer);
        // This doesn't write into the table, just makes files
        assertEquals("HFOF should not touch actual table", 0, util.countRows(table));
        // Make sure that a directory was created for every CF
        int dir = 0;
        for (FileStatus f : testDir.getFileSystem(conf).listStatus(testDir)) {
            for (byte[] family : FAMILIES) {
                if (Bytes.toString(family).equals(f.getPath().getName())) {
                    ++dir;
                }
            }
        }
        assertEquals("Column family not found in FS.", FAMILIES.length, dir);
        // handle the split case
        if (shouldChangeRegions) {
            LOG.info("Changing regions in table");
            admin.disableTable(table.getName());
            util.waitUntilNoRegionsInTransition();
            util.deleteTable(table.getName());
            byte[][] newSplitKeys = generateRandomSplitKeys(14);
            table = util.createTable(tableName, FAMILIES, newSplitKeys);
            while (util.getConnection().getRegionLocator(tableName).getAllRegionLocations().size() != 15 || !admin.isTableAvailable(table.getName())) {
                Thread.sleep(200);
                LOG.info("Waiting for new region assignment to happen");
            }
        }
        // Perform the actual load
        new LoadIncrementalHFiles(conf).doBulkLoad(testDir, admin, table, r);
        // Ensure data shows up
        int expectedRows = 0;
        if (putSortReducer) {
            // no rows should be extracted
            assertEquals("LoadIncrementalHFiles should put expected data in table", expectedRows, util.countRows(table));
        } else {
            expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
            assertEquals("LoadIncrementalHFiles should put expected data in table", expectedRows, util.countRows(table));
            Scan scan = new Scan();
            ResultScanner results = table.getScanner(scan);
            for (Result res : results) {
                assertEquals(FAMILIES.length, res.rawCells().length);
                Cell first = res.rawCells()[0];
                for (Cell kv : res.rawCells()) {
                    assertTrue(CellUtil.matchingRow(first, kv));
                    assertTrue(Bytes.equals(CellUtil.cloneValue(first), CellUtil.cloneValue(kv)));
                }
            }
            results.close();
        }
        String tableDigestBefore = util.checksumRows(table);
        // Check region locality
        HDFSBlocksDistribution hbd = new HDFSBlocksDistribution();
        for (HRegion region : util.getHBaseCluster().getRegions(tableName)) {
            hbd.add(region.getHDFSBlocksDistribution());
        }
        for (String hostname : hostnames) {
            float locality = hbd.getBlockLocalityIndex(hostname);
            LOG.info("locality of [" + hostname + "]: " + locality);
            assertEquals(100, (int) (locality * 100));
        }
        // Cause regions to reopen
        admin.disableTable(tableName);
        while (!admin.isTableDisabled(tableName)) {
            Thread.sleep(200);
            LOG.info("Waiting for table to disable");
        }
        admin.enableTable(tableName);
        util.waitTableAvailable(tableName);
        assertEquals("Data should remain after reopening of regions", tableDigestBefore, util.checksumRows(table));
    } finally {
        testDir.getFileSystem(conf).delete(testDir, true);
        util.deleteTable(tableName);
        util.shutdownMiniCluster();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) FileStatus(org.apache.hadoop.fs.FileStatus) LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) Admin(org.apache.hadoop.hbase.client.Admin) HDFSBlocksDistribution(org.apache.hadoop.hbase.HDFSBlocksDistribution) Result(org.apache.hadoop.hbase.client.Result) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell)

Example 33 with HBaseTestingUtility

use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.

the class TestLoadIncrementalHFilesSplitRecovery method setupCluster.

@BeforeClass
public static void setupCluster() throws Exception {
    util = new HBaseTestingUtility();
    util.getConfiguration().set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, "");
    util.startMiniCluster(1);
}
Also used : HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) BeforeClass(org.junit.BeforeClass)

Example 34 with HBaseTestingUtility

use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.

the class TestZKSecretWatcher method setupBeforeClass.

@BeforeClass
public static void setupBeforeClass() throws Exception {
    TEST_UTIL = new HBaseTestingUtility();
    TEST_UTIL.startMiniZKCluster();
    Configuration conf = TEST_UTIL.getConfiguration();
    ZooKeeperWatcher zk = newZK(conf, "server1", new MockAbortable());
    AuthenticationTokenSecretManagerForTest[] tmp = new AuthenticationTokenSecretManagerForTest[2];
    tmp[0] = new AuthenticationTokenSecretManagerForTest(conf, zk, "server1", 60 * 60 * 1000, 60 * 1000);
    tmp[0].start();
    zk = newZK(conf, "server2", new MockAbortable());
    tmp[1] = new AuthenticationTokenSecretManagerForTest(conf, zk, "server2", 60 * 60 * 1000, 60 * 1000);
    tmp[1].start();
    while (KEY_MASTER == null) {
        for (int i = 0; i < 2; i++) {
            if (tmp[i].isMaster()) {
                KEY_MASTER = tmp[i];
                KEY_SLAVE = tmp[(i + 1) % 2];
                break;
            }
        }
        Thread.sleep(500);
    }
    LOG.info("Master is " + KEY_MASTER.getName() + ", slave is " + KEY_SLAVE.getName());
}
Also used : HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) Configuration(org.apache.hadoop.conf.Configuration) HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) ZooKeeperWatcher(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher) BeforeClass(org.junit.BeforeClass)

Example 35 with HBaseTestingUtility

use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.

the class TestZKSecretWatcherRefreshKeys method setupBeforeClass.

@BeforeClass
public static void setupBeforeClass() throws Exception {
    TEST_UTIL = new HBaseTestingUtility();
    TEST_UTIL.startMiniZKCluster();
}
Also used : HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) BeforeClass(org.junit.BeforeClass)

Aggregations

HBaseTestingUtility (org.apache.hadoop.hbase.HBaseTestingUtility)136 Configuration (org.apache.hadoop.conf.Configuration)50 BeforeClass (org.junit.BeforeClass)49 Test (org.junit.Test)42 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)35 Path (org.apache.hadoop.fs.Path)29 Admin (org.apache.hadoop.hbase.client.Admin)24 FileSystem (org.apache.hadoop.fs.FileSystem)22 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)20 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)18 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)16 Before (org.junit.Before)14 MiniHBaseCluster (org.apache.hadoop.hbase.MiniHBaseCluster)11 ZooKeeperWatcher (org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher)11 MiniZooKeeperCluster (org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster)10 Table (org.apache.hadoop.hbase.client.Table)8 HFileSystem (org.apache.hadoop.hbase.fs.HFileSystem)8 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)8 FileStatus (org.apache.hadoop.fs.FileStatus)7 Result (org.apache.hadoop.hbase.client.Result)7