Search in sources :

Example 6 with HBaseTestingUtil

use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.

the class CompactRandomRegionOfTableAction method perform.

@Override
public void perform() throws Exception {
    HBaseTestingUtil util = context.getHBaseIntegrationTestingUtility();
    Admin admin = util.getAdmin();
    boolean major = RandomUtils.nextInt(0, 100) < majorRatio;
    getLogger().info("Performing action: Compact random region of table " + tableName + ", major=" + major);
    List<RegionInfo> regions = admin.getRegions(tableName);
    if (regions == null || regions.isEmpty()) {
        getLogger().info("Table " + tableName + " doesn't have regions to compact");
        return;
    }
    RegionInfo region = PolicyBasedChaosMonkey.selectRandomItem(regions.toArray(new RegionInfo[0]));
    try {
        if (major) {
            getLogger().debug("Major compacting region " + region.getRegionNameAsString());
            admin.majorCompactRegion(region.getRegionName());
        } else {
            getLogger().debug("Compacting region " + region.getRegionNameAsString());
            admin.compactRegion(region.getRegionName());
        }
    } catch (Exception ex) {
        getLogger().warn("Compaction failed, might be caused by other chaos: " + ex.getMessage());
    }
    if (sleepTime > 0) {
        Thread.sleep(sleepTime);
    }
}
Also used : RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) HBaseTestingUtil(org.apache.hadoop.hbase.HBaseTestingUtil) Admin(org.apache.hadoop.hbase.client.Admin)

Example 7 with HBaseTestingUtil

use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.

the class TestHFileOutputFormat2 method testBlockStoragePolicy.

@Test
public void testBlockStoragePolicy() throws Exception {
    util = new HBaseTestingUtil();
    Configuration conf = util.getConfiguration();
    conf.set(HFileOutputFormat2.STORAGE_POLICY_PROPERTY, "ALL_SSD");
    conf.set(HFileOutputFormat2.STORAGE_POLICY_PROPERTY_CF_PREFIX + Bytes.toString(HFileOutputFormat2.combineTableNameSuffix(TABLE_NAMES[0].getName(), FAMILIES[0])), "ONE_SSD");
    Path cf1Dir = new Path(util.getDataTestDir(), Bytes.toString(FAMILIES[0]));
    Path cf2Dir = new Path(util.getDataTestDir(), Bytes.toString(FAMILIES[1]));
    util.startMiniDFSCluster(3);
    FileSystem fs = util.getDFSCluster().getFileSystem();
    try {
        fs.mkdirs(cf1Dir);
        fs.mkdirs(cf2Dir);
        // the original block storage policy would be HOT
        String spA = getStoragePolicyName(fs, cf1Dir);
        String spB = getStoragePolicyName(fs, cf2Dir);
        LOG.debug("Storage policy of cf 0: [" + spA + "].");
        LOG.debug("Storage policy of cf 1: [" + spB + "].");
        assertEquals("HOT", spA);
        assertEquals("HOT", spB);
        // alter table cf schema to change storage policies
        HFileOutputFormat2.configureStoragePolicy(conf, fs, HFileOutputFormat2.combineTableNameSuffix(TABLE_NAMES[0].getName(), FAMILIES[0]), cf1Dir);
        HFileOutputFormat2.configureStoragePolicy(conf, fs, HFileOutputFormat2.combineTableNameSuffix(TABLE_NAMES[0].getName(), FAMILIES[1]), cf2Dir);
        spA = getStoragePolicyName(fs, cf1Dir);
        spB = getStoragePolicyName(fs, cf2Dir);
        LOG.debug("Storage policy of cf 0: [" + spA + "].");
        LOG.debug("Storage policy of cf 1: [" + spB + "].");
        assertNotNull(spA);
        assertEquals("ONE_SSD", spA);
        assertNotNull(spB);
        assertEquals("ALL_SSD", spB);
    } finally {
        fs.delete(cf1Dir, true);
        fs.delete(cf2Dir, true);
        util.shutdownMiniDFSCluster();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) FileSystem(org.apache.hadoop.fs.FileSystem) TestHRegionFileSystem(org.apache.hadoop.hbase.regionserver.TestHRegionFileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) HBaseTestingUtil(org.apache.hadoop.hbase.HBaseTestingUtil) Test(org.junit.Test)

Example 8 with HBaseTestingUtil

use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.

the class TestHBaseMRTestingUtility method testMRYarnConfigsPopulation.

@Test
public void testMRYarnConfigsPopulation() throws IOException {
    Map<String, String> dummyProps = new HashMap<>();
    dummyProps.put("mapreduce.jobtracker.address", "dummyhost:11234");
    dummyProps.put("yarn.resourcemanager.address", "dummyhost:11235");
    dummyProps.put("mapreduce.jobhistory.address", "dummyhost:11236");
    dummyProps.put("yarn.resourcemanager.scheduler.address", "dummyhost:11237");
    dummyProps.put("mapreduce.jobhistory.webapp.address", "dummyhost:11238");
    dummyProps.put("yarn.resourcemanager.webapp.address", "dummyhost:11239");
    HBaseTestingUtil hbt = new HBaseTestingUtil();
    // populate the mr props to the Configuration instance
    for (Map.Entry<String, String> entry : dummyProps.entrySet()) {
        hbt.getConfiguration().set(entry.getKey(), entry.getValue());
    }
    for (Map.Entry<String, String> entry : dummyProps.entrySet()) {
        assertTrue("The Configuration for key " + entry.getKey() + " and value: " + entry.getValue() + " is not populated correctly", hbt.getConfiguration().get(entry.getKey()).equals(entry.getValue()));
    }
    hbt.startMiniMapReduceCluster();
    // Confirm that MiniMapReduceCluster overwrites the mr properties and updates the Configuration
    for (Map.Entry<String, String> entry : dummyProps.entrySet()) {
        assertFalse("The MR prop: " + entry.getValue() + " is not overwritten when map reduce mini" + "cluster is started", hbt.getConfiguration().get(entry.getKey()).equals(entry.getValue()));
    }
    hbt.shutdownMiniMapReduceCluster();
}
Also used : HashMap(java.util.HashMap) HBaseTestingUtil(org.apache.hadoop.hbase.HBaseTestingUtil) Map(java.util.Map) HashMap(java.util.HashMap) Test(org.junit.Test)

Example 9 with HBaseTestingUtil

use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.

the class SnapshotTableAction method perform.

@Override
public void perform() throws Exception {
    HBaseTestingUtil util = context.getHBaseIntegrationTestingUtility();
    String snapshotName = tableName + "-it-" + EnvironmentEdgeManager.currentTime();
    Admin admin = util.getAdmin();
    // Don't try the snapshot if we're stopping
    if (context.isStopping()) {
        return;
    }
    getLogger().info("Performing action: Snapshot table {}", tableName);
    admin.snapshot(snapshotName, tableName);
    if (sleepTime > 0) {
        Thread.sleep(sleepTime);
    }
}
Also used : HBaseTestingUtil(org.apache.hadoop.hbase.HBaseTestingUtil) Admin(org.apache.hadoop.hbase.client.Admin)

Example 10 with HBaseTestingUtil

use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.

the class FlushTableAction method perform.

@Override
public void perform() throws Exception {
    HBaseTestingUtil util = context.getHBaseIntegrationTestingUtility();
    Admin admin = util.getAdmin();
    // Don't try the flush if we're stopping
    if (context.isStopping()) {
        return;
    }
    getLogger().info("Performing action: Flush table " + tableName);
    try {
        admin.flush(tableName);
    } catch (Exception ex) {
        getLogger().warn("Flush failed, might be caused by other chaos: " + ex.getMessage());
    }
    if (sleepTime > 0) {
        Thread.sleep(sleepTime);
    }
}
Also used : HBaseTestingUtil(org.apache.hadoop.hbase.HBaseTestingUtil) Admin(org.apache.hadoop.hbase.client.Admin)

Aggregations

HBaseTestingUtil (org.apache.hadoop.hbase.HBaseTestingUtil)144 Configuration (org.apache.hadoop.conf.Configuration)42 Test (org.junit.Test)42 Before (org.junit.Before)41 BeforeClass (org.junit.BeforeClass)37 Path (org.apache.hadoop.fs.Path)24 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)22 Admin (org.apache.hadoop.hbase.client.Admin)22 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)15 StartTestingClusterOption (org.apache.hadoop.hbase.StartTestingClusterOption)14 FileSystem (org.apache.hadoop.fs.FileSystem)13 MiniZooKeeperCluster (org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster)12 TableName (org.apache.hadoop.hbase.TableName)10 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)10 SingleProcessHBaseCluster (org.apache.hadoop.hbase.SingleProcessHBaseCluster)9 ServerName (org.apache.hadoop.hbase.ServerName)8 Table (org.apache.hadoop.hbase.client.Table)8 ZKWatcher (org.apache.hadoop.hbase.zookeeper.ZKWatcher)8 IOException (java.io.IOException)7 ArrayList (java.util.ArrayList)7