Search in sources :

Example 71 with HBaseTestingUtility

use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.

the class TestBucketCache method testRetrieveFromFile.

@Test
public void testRetrieveFromFile() throws Exception {
    HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
    Path testDir = TEST_UTIL.getDataTestDir();
    TEST_UTIL.getTestFileSystem().mkdirs(testDir);
    BucketCache bucketCache = new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, constructedBlockSize, constructedBlockSizes, writeThreads, writerQLen, testDir + "/bucket.persistence");
    long usedSize = bucketCache.getAllocator().getUsedSize();
    assertTrue(usedSize == 0);
    HFileBlockPair[] blocks = CacheTestUtils.generateHFileBlocks(constructedBlockSize, 1);
    // Add blocks
    for (HFileBlockPair block : blocks) {
        bucketCache.cacheBlock(block.getBlockName(), block.getBlock());
    }
    for (HFileBlockPair block : blocks) {
        cacheAndWaitUntilFlushedToBucket(bucketCache, block.getBlockName(), block.getBlock());
    }
    usedSize = bucketCache.getAllocator().getUsedSize();
    assertTrue(usedSize != 0);
    // persist cache to file
    bucketCache.shutdown();
    // restore cache from file
    bucketCache = new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, constructedBlockSize, constructedBlockSizes, writeThreads, writerQLen, testDir + "/bucket.persistence");
    assertEquals(usedSize, bucketCache.getAllocator().getUsedSize());
    // persist cache to file
    bucketCache.shutdown();
    // reconfig buckets sizes, the biggest bucket is small than constructedBlockSize (8k or 16k)
    // so it can't restore cache from file
    int[] smallBucketSizes = new int[] { 2 * 1024 + 1024, 4 * 1024 + 1024 };
    bucketCache = new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, constructedBlockSize, smallBucketSizes, writeThreads, writerQLen, testDir + "/bucket.persistence");
    assertEquals(0, bucketCache.getAllocator().getUsedSize());
    assertEquals(0, bucketCache.backingMap.size());
    TEST_UTIL.cleanupTestDir();
}
Also used : Path(org.apache.hadoop.fs.Path) HFileBlockPair(org.apache.hadoop.hbase.io.hfile.CacheTestUtils.HFileBlockPair) HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) Test(org.junit.Test)

Example 72 with HBaseTestingUtility

use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.

the class TestSecureLoadIncrementalHFilesSplitRecovery method setupCluster.

//This "overrides" the parent static method
//make sure they are in sync
@BeforeClass
public static void setupCluster() throws Exception {
    util = new HBaseTestingUtility();
    // set the always on security provider
    UserProvider.setUserProviderForTesting(util.getConfiguration(), HadoopSecurityEnabledUserProviderForTesting.class);
    // setup configuration
    SecureTestUtil.enableSecurity(util.getConfiguration());
    util.startMiniCluster();
    // Wait for the ACL table to become available
    util.waitTableEnabled(AccessControlLists.ACL_TABLE_NAME);
}
Also used : HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) BeforeClass(org.junit.BeforeClass)

Example 73 with HBaseTestingUtility

use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.

the class TestHFileOutputFormat2 method manualTest.

public void manualTest(String[] args) throws Exception {
    Configuration conf = HBaseConfiguration.create();
    util = new HBaseTestingUtility(conf);
    if ("newtable".equals(args[0])) {
        TableName tname = TableName.valueOf(args[1]);
        byte[][] splitKeys = generateRandomSplitKeys(4);
        try (Table table = util.createTable(tname, FAMILIES, splitKeys)) {
        }
    } else if ("incremental".equals(args[0])) {
        TableName tname = TableName.valueOf(args[1]);
        try (Connection c = ConnectionFactory.createConnection(conf);
            Admin admin = c.getAdmin();
            RegionLocator regionLocator = c.getRegionLocator(tname)) {
            Path outDir = new Path("incremental-out");
            runIncrementalPELoad(conf, admin.getTableDescriptor(tname), regionLocator, outDir, false);
        }
    } else {
        throw new RuntimeException("usage: TestHFileOutputFormat2 newtable | incremental");
    }
}
Also used : Path(org.apache.hadoop.fs.Path) TableName(org.apache.hadoop.hbase.TableName) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) Connection(org.apache.hadoop.hbase.client.Connection) Admin(org.apache.hadoop.hbase.client.Admin)

Example 74 with HBaseTestingUtility

use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.

the class TestDistributedLogSplitting method startCluster.

private void startCluster(int num_rs) throws Exception {
    SplitLogCounters.resetCounters();
    LOG.info("Starting cluster");
    conf.getLong("hbase.splitlog.max.resubmit", 0);
    // Make the failure test faster
    conf.setInt("zookeeper.recovery.retry", 0);
    conf.setInt(HConstants.REGIONSERVER_INFO_PORT, -1);
    // no load balancing
    conf.setFloat(HConstants.LOAD_BALANCER_SLOP_KEY, (float) 100.0);
    conf.setInt("hbase.regionserver.wal.max.splitters", 3);
    conf.setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 10);
    TEST_UTIL.shutdownMiniHBaseCluster();
    TEST_UTIL = new HBaseTestingUtility(conf);
    TEST_UTIL.setDFSCluster(dfsCluster);
    TEST_UTIL.setZkCluster(zkCluster);
    TEST_UTIL.startMiniHBaseCluster(NUM_MASTERS, num_rs);
    cluster = TEST_UTIL.getHBaseCluster();
    LOG.info("Waiting for active/ready master");
    cluster.waitForActiveAndReadyMaster();
    master = cluster.getMaster();
    while (cluster.getLiveRegionServerThreads().size() < num_rs) {
        Threads.sleep(10);
    }
}
Also used : HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility)

Example 75 with HBaseTestingUtility

use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.

the class TestCatalogJanitor method testArchiveOldRegion.

@Test
public void testArchiveOldRegion() throws Exception {
    HBaseTestingUtility htu = new HBaseTestingUtility();
    setRootDirAndCleanIt(htu, "testCleanParent");
    MasterServices services = new MockMasterServices(htu);
    // create the janitor
    CatalogJanitor janitor = new CatalogJanitor(services);
    // Create regions.
    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName()));
    htd.addFamily(new HColumnDescriptor("f"));
    HRegionInfo parent = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("eee"));
    HRegionInfo splita = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("ccc"));
    HRegionInfo splitb = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ccc"), Bytes.toBytes("eee"));
    // Test that when both daughter regions are in place, that we do not
    // remove the parent.
    Result parentMetaRow = createResult(parent, splita, splitb);
    FileSystem fs = FileSystem.get(htu.getConfiguration());
    Path rootdir = services.getMasterFileSystem().getRootDir();
    // have to set the root directory since we use it in HFileDisposer to figure out to get to the
    // archive directory. Otherwise, it just seems to pick the first root directory it can find (so
    // the single test passes, but when the full suite is run, things get borked).
    FSUtils.setRootDir(fs.getConf(), rootdir);
    Path tabledir = FSUtils.getTableDir(rootdir, htd.getTableName());
    Path storedir = HStore.getStoreHomedir(tabledir, parent, htd.getColumnFamilies()[0].getName());
    Path storeArchive = HFileArchiveUtil.getStoreArchivePath(services.getConfiguration(), parent, tabledir, htd.getColumnFamilies()[0].getName());
    LOG.debug("Table dir:" + tabledir);
    LOG.debug("Store dir:" + storedir);
    LOG.debug("Store archive dir:" + storeArchive);
    // add a couple of store files that we can check for
    FileStatus[] mockFiles = addMockStoreFiles(2, services, storedir);
    // get the current store files for comparison
    FileStatus[] storeFiles = fs.listStatus(storedir);
    int index = 0;
    for (FileStatus file : storeFiles) {
        LOG.debug("Have store file:" + file.getPath());
        assertEquals("Got unexpected store file", mockFiles[index].getPath(), storeFiles[index].getPath());
        index++;
    }
    // do the cleaning of the parent
    assertTrue(janitor.cleanParent(parent, parentMetaRow));
    LOG.debug("Finished cleanup of parent region");
    // and now check to make sure that the files have actually been archived
    FileStatus[] archivedStoreFiles = fs.listStatus(storeArchive);
    logFiles("archived files", storeFiles);
    logFiles("archived files", archivedStoreFiles);
    assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs);
    // cleanup
    FSUtils.delete(fs, rootdir, true);
    services.stop("Test finished");
    janitor.cancel(true);
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Result(org.apache.hadoop.hbase.client.Result) RegionActionResult(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionActionResult) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) FileSystem(org.apache.hadoop.fs.FileSystem) Test(org.junit.Test)

Aggregations

HBaseTestingUtility (org.apache.hadoop.hbase.HBaseTestingUtility)136 Configuration (org.apache.hadoop.conf.Configuration)50 BeforeClass (org.junit.BeforeClass)49 Test (org.junit.Test)42 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)35 Path (org.apache.hadoop.fs.Path)29 Admin (org.apache.hadoop.hbase.client.Admin)24 FileSystem (org.apache.hadoop.fs.FileSystem)22 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)20 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)18 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)16 Before (org.junit.Before)14 MiniHBaseCluster (org.apache.hadoop.hbase.MiniHBaseCluster)11 ZooKeeperWatcher (org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher)11 MiniZooKeeperCluster (org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster)10 Table (org.apache.hadoop.hbase.client.Table)8 HFileSystem (org.apache.hadoop.hbase.fs.HFileSystem)8 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)8 FileStatus (org.apache.hadoop.fs.FileStatus)7 Result (org.apache.hadoop.hbase.client.Result)7