use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.
the class TestHFileOutputFormat2 method testBlockStoragePolicy.
@Test
public void testBlockStoragePolicy() throws Exception {
util = new HBaseTestingUtility();
Configuration conf = util.getConfiguration();
conf.set(HFileOutputFormat2.STORAGE_POLICY_PROPERTY, "ALL_SSD");
conf.set(HFileOutputFormat2.STORAGE_POLICY_PROPERTY_CF_PREFIX + Bytes.toString(FAMILIES[0]), "ONE_SSD");
Path cf1Dir = new Path(util.getDataTestDir(), Bytes.toString(FAMILIES[0]));
Path cf2Dir = new Path(util.getDataTestDir(), Bytes.toString(FAMILIES[1]));
util.startMiniDFSCluster(3);
FileSystem fs = util.getDFSCluster().getFileSystem();
try {
fs.mkdirs(cf1Dir);
fs.mkdirs(cf2Dir);
// the original block storage policy would be HOT
String spA = getStoragePolicyName(fs, cf1Dir);
String spB = getStoragePolicyName(fs, cf2Dir);
LOG.debug("Storage policy of cf 0: [" + spA + "].");
LOG.debug("Storage policy of cf 1: [" + spB + "].");
assertEquals("HOT", spA);
assertEquals("HOT", spB);
// alter table cf schema to change storage policies
HFileOutputFormat2.configureStoragePolicy(conf, fs, FAMILIES[0], cf1Dir);
HFileOutputFormat2.configureStoragePolicy(conf, fs, FAMILIES[1], cf2Dir);
spA = getStoragePolicyName(fs, cf1Dir);
spB = getStoragePolicyName(fs, cf2Dir);
LOG.debug("Storage policy of cf 0: [" + spA + "].");
LOG.debug("Storage policy of cf 1: [" + spB + "].");
assertNotNull(spA);
assertEquals("ONE_SSD", spA);
assertNotNull(spB);
assertEquals("ALL_SSD", spB);
} finally {
fs.delete(cf1Dir, true);
fs.delete(cf2Dir, true);
util.shutdownMiniDFSCluster();
}
}
use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.
the class TestHFileOutputFormat2 method doIncrementalLoadTest.
private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKeepLocality, boolean putSortReducer, String tableStr) throws Exception {
util = new HBaseTestingUtility();
Configuration conf = util.getConfiguration();
conf.setBoolean(HFileOutputFormat2.LOCALITY_SENSITIVE_CONF_KEY, shouldKeepLocality);
int hostCount = 1;
int regionNum = 5;
if (shouldKeepLocality) {
// We should change host count higher than hdfs replica count when MiniHBaseCluster supports
// explicit hostnames parameter just like MiniDFSCluster does.
hostCount = 3;
regionNum = 20;
}
byte[][] splitKeys = generateRandomSplitKeys(regionNum - 1);
String[] hostnames = new String[hostCount];
for (int i = 0; i < hostCount; ++i) {
hostnames[i] = "datanode_" + i;
}
util.startMiniCluster(1, hostCount, hostnames);
TableName tableName = TableName.valueOf(tableStr);
Table table = util.createTable(tableName, FAMILIES, splitKeys);
Path testDir = util.getDataTestDirOnTestFS("testLocalMRIncrementalLoad");
FileSystem fs = testDir.getFileSystem(conf);
try (RegionLocator r = util.getConnection().getRegionLocator(tableName);
Admin admin = util.getConnection().getAdmin()) {
assertEquals("Should start with empty table", 0, util.countRows(table));
int numRegions = r.getStartKeys().length;
assertEquals("Should make " + regionNum + " regions", numRegions, regionNum);
// Generate the bulk load files
runIncrementalPELoad(conf, table.getTableDescriptor(), r, testDir, putSortReducer);
// This doesn't write into the table, just makes files
assertEquals("HFOF should not touch actual table", 0, util.countRows(table));
// Make sure that a directory was created for every CF
int dir = 0;
for (FileStatus f : testDir.getFileSystem(conf).listStatus(testDir)) {
for (byte[] family : FAMILIES) {
if (Bytes.toString(family).equals(f.getPath().getName())) {
++dir;
}
}
}
assertEquals("Column family not found in FS.", FAMILIES.length, dir);
// handle the split case
if (shouldChangeRegions) {
LOG.info("Changing regions in table");
admin.disableTable(table.getName());
util.waitUntilNoRegionsInTransition();
util.deleteTable(table.getName());
byte[][] newSplitKeys = generateRandomSplitKeys(14);
table = util.createTable(tableName, FAMILIES, newSplitKeys);
while (util.getConnection().getRegionLocator(tableName).getAllRegionLocations().size() != 15 || !admin.isTableAvailable(table.getName())) {
Thread.sleep(200);
LOG.info("Waiting for new region assignment to happen");
}
}
// Perform the actual load
new LoadIncrementalHFiles(conf).doBulkLoad(testDir, admin, table, r);
// Ensure data shows up
int expectedRows = 0;
if (putSortReducer) {
// no rows should be extracted
assertEquals("LoadIncrementalHFiles should put expected data in table", expectedRows, util.countRows(table));
} else {
expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
assertEquals("LoadIncrementalHFiles should put expected data in table", expectedRows, util.countRows(table));
Scan scan = new Scan();
ResultScanner results = table.getScanner(scan);
for (Result res : results) {
assertEquals(FAMILIES.length, res.rawCells().length);
Cell first = res.rawCells()[0];
for (Cell kv : res.rawCells()) {
assertTrue(CellUtil.matchingRow(first, kv));
assertTrue(Bytes.equals(CellUtil.cloneValue(first), CellUtil.cloneValue(kv)));
}
}
results.close();
}
String tableDigestBefore = util.checksumRows(table);
// Check region locality
HDFSBlocksDistribution hbd = new HDFSBlocksDistribution();
for (HRegion region : util.getHBaseCluster().getRegions(tableName)) {
hbd.add(region.getHDFSBlocksDistribution());
}
for (String hostname : hostnames) {
float locality = hbd.getBlockLocalityIndex(hostname);
LOG.info("locality of [" + hostname + "]: " + locality);
assertEquals(100, (int) (locality * 100));
}
// Cause regions to reopen
admin.disableTable(tableName);
while (!admin.isTableDisabled(tableName)) {
Thread.sleep(200);
LOG.info("Waiting for table to disable");
}
admin.enableTable(tableName);
util.waitTableAvailable(tableName);
assertEquals("Data should remain after reopening of regions", tableDigestBefore, util.checksumRows(table));
} finally {
testDir.getFileSystem(conf).delete(testDir, true);
util.deleteTable(tableName);
util.shutdownMiniCluster();
}
}
use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.
the class TestLoadIncrementalHFilesSplitRecovery method setupCluster.
@BeforeClass
public static void setupCluster() throws Exception {
util = new HBaseTestingUtility();
util.getConfiguration().set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, "");
util.startMiniCluster(1);
}
use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.
the class TestZKSecretWatcher method setupBeforeClass.
@BeforeClass
public static void setupBeforeClass() throws Exception {
TEST_UTIL = new HBaseTestingUtility();
TEST_UTIL.startMiniZKCluster();
Configuration conf = TEST_UTIL.getConfiguration();
ZooKeeperWatcher zk = newZK(conf, "server1", new MockAbortable());
AuthenticationTokenSecretManagerForTest[] tmp = new AuthenticationTokenSecretManagerForTest[2];
tmp[0] = new AuthenticationTokenSecretManagerForTest(conf, zk, "server1", 60 * 60 * 1000, 60 * 1000);
tmp[0].start();
zk = newZK(conf, "server2", new MockAbortable());
tmp[1] = new AuthenticationTokenSecretManagerForTest(conf, zk, "server2", 60 * 60 * 1000, 60 * 1000);
tmp[1].start();
while (KEY_MASTER == null) {
for (int i = 0; i < 2; i++) {
if (tmp[i].isMaster()) {
KEY_MASTER = tmp[i];
KEY_SLAVE = tmp[(i + 1) % 2];
break;
}
}
Thread.sleep(500);
}
LOG.info("Master is " + KEY_MASTER.getName() + ", slave is " + KEY_SLAVE.getName());
}
use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.
the class TestZKSecretWatcherRefreshKeys method setupBeforeClass.
@BeforeClass
public static void setupBeforeClass() throws Exception {
TEST_UTIL = new HBaseTestingUtility();
TEST_UTIL.startMiniZKCluster();
}
Aggregations