use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.
the class TestBucketCache method testRetrieveFromFile.
@Test
public void testRetrieveFromFile() throws Exception {
HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
Path testDir = TEST_UTIL.getDataTestDir();
TEST_UTIL.getTestFileSystem().mkdirs(testDir);
BucketCache bucketCache = new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, constructedBlockSize, constructedBlockSizes, writeThreads, writerQLen, testDir + "/bucket.persistence");
long usedSize = bucketCache.getAllocator().getUsedSize();
assertTrue(usedSize == 0);
HFileBlockPair[] blocks = CacheTestUtils.generateHFileBlocks(constructedBlockSize, 1);
// Add blocks
for (HFileBlockPair block : blocks) {
bucketCache.cacheBlock(block.getBlockName(), block.getBlock());
}
for (HFileBlockPair block : blocks) {
cacheAndWaitUntilFlushedToBucket(bucketCache, block.getBlockName(), block.getBlock());
}
usedSize = bucketCache.getAllocator().getUsedSize();
assertTrue(usedSize != 0);
// persist cache to file
bucketCache.shutdown();
// restore cache from file
bucketCache = new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, constructedBlockSize, constructedBlockSizes, writeThreads, writerQLen, testDir + "/bucket.persistence");
assertEquals(usedSize, bucketCache.getAllocator().getUsedSize());
// persist cache to file
bucketCache.shutdown();
// reconfig buckets sizes, the biggest bucket is small than constructedBlockSize (8k or 16k)
// so it can't restore cache from file
int[] smallBucketSizes = new int[] { 2 * 1024 + 1024, 4 * 1024 + 1024 };
bucketCache = new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, constructedBlockSize, smallBucketSizes, writeThreads, writerQLen, testDir + "/bucket.persistence");
assertEquals(0, bucketCache.getAllocator().getUsedSize());
assertEquals(0, bucketCache.backingMap.size());
TEST_UTIL.cleanupTestDir();
}
use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.
the class TestSecureLoadIncrementalHFilesSplitRecovery method setupCluster.
//This "overrides" the parent static method
//make sure they are in sync
@BeforeClass
public static void setupCluster() throws Exception {
util = new HBaseTestingUtility();
// set the always on security provider
UserProvider.setUserProviderForTesting(util.getConfiguration(), HadoopSecurityEnabledUserProviderForTesting.class);
// setup configuration
SecureTestUtil.enableSecurity(util.getConfiguration());
util.startMiniCluster();
// Wait for the ACL table to become available
util.waitTableEnabled(AccessControlLists.ACL_TABLE_NAME);
}
use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.
the class TestHFileOutputFormat2 method manualTest.
public void manualTest(String[] args) throws Exception {
Configuration conf = HBaseConfiguration.create();
util = new HBaseTestingUtility(conf);
if ("newtable".equals(args[0])) {
TableName tname = TableName.valueOf(args[1]);
byte[][] splitKeys = generateRandomSplitKeys(4);
try (Table table = util.createTable(tname, FAMILIES, splitKeys)) {
}
} else if ("incremental".equals(args[0])) {
TableName tname = TableName.valueOf(args[1]);
try (Connection c = ConnectionFactory.createConnection(conf);
Admin admin = c.getAdmin();
RegionLocator regionLocator = c.getRegionLocator(tname)) {
Path outDir = new Path("incremental-out");
runIncrementalPELoad(conf, admin.getTableDescriptor(tname), regionLocator, outDir, false);
}
} else {
throw new RuntimeException("usage: TestHFileOutputFormat2 newtable | incremental");
}
}
use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.
the class TestDistributedLogSplitting method startCluster.
private void startCluster(int num_rs) throws Exception {
SplitLogCounters.resetCounters();
LOG.info("Starting cluster");
conf.getLong("hbase.splitlog.max.resubmit", 0);
// Make the failure test faster
conf.setInt("zookeeper.recovery.retry", 0);
conf.setInt(HConstants.REGIONSERVER_INFO_PORT, -1);
// no load balancing
conf.setFloat(HConstants.LOAD_BALANCER_SLOP_KEY, (float) 100.0);
conf.setInt("hbase.regionserver.wal.max.splitters", 3);
conf.setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 10);
TEST_UTIL.shutdownMiniHBaseCluster();
TEST_UTIL = new HBaseTestingUtility(conf);
TEST_UTIL.setDFSCluster(dfsCluster);
TEST_UTIL.setZkCluster(zkCluster);
TEST_UTIL.startMiniHBaseCluster(NUM_MASTERS, num_rs);
cluster = TEST_UTIL.getHBaseCluster();
LOG.info("Waiting for active/ready master");
cluster.waitForActiveAndReadyMaster();
master = cluster.getMaster();
while (cluster.getLiveRegionServerThreads().size() < num_rs) {
Threads.sleep(10);
}
}
use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.
the class TestCatalogJanitor method testArchiveOldRegion.
@Test
public void testArchiveOldRegion() throws Exception {
HBaseTestingUtility htu = new HBaseTestingUtility();
setRootDirAndCleanIt(htu, "testCleanParent");
MasterServices services = new MockMasterServices(htu);
// create the janitor
CatalogJanitor janitor = new CatalogJanitor(services);
// Create regions.
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName()));
htd.addFamily(new HColumnDescriptor("f"));
HRegionInfo parent = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("eee"));
HRegionInfo splita = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("ccc"));
HRegionInfo splitb = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ccc"), Bytes.toBytes("eee"));
// Test that when both daughter regions are in place, that we do not
// remove the parent.
Result parentMetaRow = createResult(parent, splita, splitb);
FileSystem fs = FileSystem.get(htu.getConfiguration());
Path rootdir = services.getMasterFileSystem().getRootDir();
// have to set the root directory since we use it in HFileDisposer to figure out to get to the
// archive directory. Otherwise, it just seems to pick the first root directory it can find (so
// the single test passes, but when the full suite is run, things get borked).
FSUtils.setRootDir(fs.getConf(), rootdir);
Path tabledir = FSUtils.getTableDir(rootdir, htd.getTableName());
Path storedir = HStore.getStoreHomedir(tabledir, parent, htd.getColumnFamilies()[0].getName());
Path storeArchive = HFileArchiveUtil.getStoreArchivePath(services.getConfiguration(), parent, tabledir, htd.getColumnFamilies()[0].getName());
LOG.debug("Table dir:" + tabledir);
LOG.debug("Store dir:" + storedir);
LOG.debug("Store archive dir:" + storeArchive);
// add a couple of store files that we can check for
FileStatus[] mockFiles = addMockStoreFiles(2, services, storedir);
// get the current store files for comparison
FileStatus[] storeFiles = fs.listStatus(storedir);
int index = 0;
for (FileStatus file : storeFiles) {
LOG.debug("Have store file:" + file.getPath());
assertEquals("Got unexpected store file", mockFiles[index].getPath(), storeFiles[index].getPath());
index++;
}
// do the cleaning of the parent
assertTrue(janitor.cleanParent(parent, parentMetaRow));
LOG.debug("Finished cleanup of parent region");
// and now check to make sure that the files have actually been archived
FileStatus[] archivedStoreFiles = fs.listStatus(storeArchive);
logFiles("archived files", storeFiles);
logFiles("archived files", archivedStoreFiles);
assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs);
// cleanup
FSUtils.delete(fs, rootdir, true);
services.stop("Test finished");
janitor.cancel(true);
}
Aggregations