use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.
the class CompactRandomRegionOfTableAction method perform.
@Override
public void perform() throws Exception {
HBaseTestingUtil util = context.getHBaseIntegrationTestingUtility();
Admin admin = util.getAdmin();
boolean major = RandomUtils.nextInt(0, 100) < majorRatio;
getLogger().info("Performing action: Compact random region of table " + tableName + ", major=" + major);
List<RegionInfo> regions = admin.getRegions(tableName);
if (regions == null || regions.isEmpty()) {
getLogger().info("Table " + tableName + " doesn't have regions to compact");
return;
}
RegionInfo region = PolicyBasedChaosMonkey.selectRandomItem(regions.toArray(new RegionInfo[0]));
try {
if (major) {
getLogger().debug("Major compacting region " + region.getRegionNameAsString());
admin.majorCompactRegion(region.getRegionName());
} else {
getLogger().debug("Compacting region " + region.getRegionNameAsString());
admin.compactRegion(region.getRegionName());
}
} catch (Exception ex) {
getLogger().warn("Compaction failed, might be caused by other chaos: " + ex.getMessage());
}
if (sleepTime > 0) {
Thread.sleep(sleepTime);
}
}
use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.
the class TestHFileOutputFormat2 method testBlockStoragePolicy.
@Test
public void testBlockStoragePolicy() throws Exception {
util = new HBaseTestingUtil();
Configuration conf = util.getConfiguration();
conf.set(HFileOutputFormat2.STORAGE_POLICY_PROPERTY, "ALL_SSD");
conf.set(HFileOutputFormat2.STORAGE_POLICY_PROPERTY_CF_PREFIX + Bytes.toString(HFileOutputFormat2.combineTableNameSuffix(TABLE_NAMES[0].getName(), FAMILIES[0])), "ONE_SSD");
Path cf1Dir = new Path(util.getDataTestDir(), Bytes.toString(FAMILIES[0]));
Path cf2Dir = new Path(util.getDataTestDir(), Bytes.toString(FAMILIES[1]));
util.startMiniDFSCluster(3);
FileSystem fs = util.getDFSCluster().getFileSystem();
try {
fs.mkdirs(cf1Dir);
fs.mkdirs(cf2Dir);
// the original block storage policy would be HOT
String spA = getStoragePolicyName(fs, cf1Dir);
String spB = getStoragePolicyName(fs, cf2Dir);
LOG.debug("Storage policy of cf 0: [" + spA + "].");
LOG.debug("Storage policy of cf 1: [" + spB + "].");
assertEquals("HOT", spA);
assertEquals("HOT", spB);
// alter table cf schema to change storage policies
HFileOutputFormat2.configureStoragePolicy(conf, fs, HFileOutputFormat2.combineTableNameSuffix(TABLE_NAMES[0].getName(), FAMILIES[0]), cf1Dir);
HFileOutputFormat2.configureStoragePolicy(conf, fs, HFileOutputFormat2.combineTableNameSuffix(TABLE_NAMES[0].getName(), FAMILIES[1]), cf2Dir);
spA = getStoragePolicyName(fs, cf1Dir);
spB = getStoragePolicyName(fs, cf2Dir);
LOG.debug("Storage policy of cf 0: [" + spA + "].");
LOG.debug("Storage policy of cf 1: [" + spB + "].");
assertNotNull(spA);
assertEquals("ONE_SSD", spA);
assertNotNull(spB);
assertEquals("ALL_SSD", spB);
} finally {
fs.delete(cf1Dir, true);
fs.delete(cf2Dir, true);
util.shutdownMiniDFSCluster();
}
}
use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.
the class TestHBaseMRTestingUtility method testMRYarnConfigsPopulation.
@Test
public void testMRYarnConfigsPopulation() throws IOException {
Map<String, String> dummyProps = new HashMap<>();
dummyProps.put("mapreduce.jobtracker.address", "dummyhost:11234");
dummyProps.put("yarn.resourcemanager.address", "dummyhost:11235");
dummyProps.put("mapreduce.jobhistory.address", "dummyhost:11236");
dummyProps.put("yarn.resourcemanager.scheduler.address", "dummyhost:11237");
dummyProps.put("mapreduce.jobhistory.webapp.address", "dummyhost:11238");
dummyProps.put("yarn.resourcemanager.webapp.address", "dummyhost:11239");
HBaseTestingUtil hbt = new HBaseTestingUtil();
// populate the mr props to the Configuration instance
for (Map.Entry<String, String> entry : dummyProps.entrySet()) {
hbt.getConfiguration().set(entry.getKey(), entry.getValue());
}
for (Map.Entry<String, String> entry : dummyProps.entrySet()) {
assertTrue("The Configuration for key " + entry.getKey() + " and value: " + entry.getValue() + " is not populated correctly", hbt.getConfiguration().get(entry.getKey()).equals(entry.getValue()));
}
hbt.startMiniMapReduceCluster();
// Confirm that MiniMapReduceCluster overwrites the mr properties and updates the Configuration
for (Map.Entry<String, String> entry : dummyProps.entrySet()) {
assertFalse("The MR prop: " + entry.getValue() + " is not overwritten when map reduce mini" + "cluster is started", hbt.getConfiguration().get(entry.getKey()).equals(entry.getValue()));
}
hbt.shutdownMiniMapReduceCluster();
}
use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.
the class SnapshotTableAction method perform.
@Override
public void perform() throws Exception {
HBaseTestingUtil util = context.getHBaseIntegrationTestingUtility();
String snapshotName = tableName + "-it-" + EnvironmentEdgeManager.currentTime();
Admin admin = util.getAdmin();
// Don't try the snapshot if we're stopping
if (context.isStopping()) {
return;
}
getLogger().info("Performing action: Snapshot table {}", tableName);
admin.snapshot(snapshotName, tableName);
if (sleepTime > 0) {
Thread.sleep(sleepTime);
}
}
use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.
the class FlushTableAction method perform.
@Override
public void perform() throws Exception {
HBaseTestingUtil util = context.getHBaseIntegrationTestingUtility();
Admin admin = util.getAdmin();
// Don't try the flush if we're stopping
if (context.isStopping()) {
return;
}
getLogger().info("Performing action: Flush table " + tableName);
try {
admin.flush(tableName);
} catch (Exception ex) {
getLogger().warn("Flush failed, might be caused by other chaos: " + ex.getMessage());
}
if (sleepTime > 0) {
Thread.sleep(sleepTime);
}
}
Aggregations