Search in sources :

Example 81 with HBaseTestingUtility

use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.

the class TestMasterMetrics method startCluster.

@BeforeClass
public static void startCluster() throws Exception {
    LOG.info("Starting cluster");
    TEST_UTIL = new HBaseTestingUtility();
    TEST_UTIL.startMiniCluster(1, 1, 1, null, MyMaster.class, null);
    cluster = TEST_UTIL.getHBaseCluster();
    LOG.info("Waiting for active/ready master");
    cluster.waitForActiveAndReadyMaster();
    master = cluster.getMaster();
}
Also used : HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) BeforeClass(org.junit.BeforeClass)

Example 82 with HBaseTestingUtility

use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.

the class TestMasterRestartAfterDisablingTable method testForCheckingIfEnableAndDisableWorksFineAfterSwitch.

@Test
public void testForCheckingIfEnableAndDisableWorksFineAfterSwitch() throws Exception {
    final int NUM_MASTERS = 2;
    final int NUM_RS = 1;
    final int NUM_REGIONS_TO_CREATE = 4;
    // Start the cluster
    log("Starting cluster");
    Configuration conf = HBaseConfiguration.create();
    HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(conf);
    TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
    log("Waiting for active/ready master");
    cluster.waitForActiveAndReadyMaster();
    // Create a table with regions
    final TableName tableName = TableName.valueOf(name.getMethodName());
    byte[] family = Bytes.toBytes("family");
    log("Creating table with " + NUM_REGIONS_TO_CREATE + " regions");
    Table ht = TEST_UTIL.createMultiRegionTable(tableName, family, NUM_REGIONS_TO_CREATE);
    int numRegions = -1;
    try (RegionLocator r = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
        numRegions = r.getStartKeys().length;
    }
    // catalogs
    numRegions += 1;
    log("Waiting for no more RIT\n");
    TEST_UTIL.waitUntilNoRegionsInTransition(60000);
    log("Disabling table\n");
    TEST_UTIL.getAdmin().disableTable(tableName);
    NavigableSet<String> regions = HBaseTestingUtility.getAllOnlineRegions(cluster);
    assertEquals("The number of regions for the table tableRestart should be 0 and only" + "the catalog and namespace tables should be present.", 2, regions.size());
    List<MasterThread> masterThreads = cluster.getMasterThreads();
    MasterThread activeMaster = null;
    if (masterThreads.get(0).getMaster().isActiveMaster()) {
        activeMaster = masterThreads.get(0);
    } else {
        activeMaster = masterThreads.get(1);
    }
    activeMaster.getMaster().stop("stopping the active master so that the backup can become active");
    cluster.hbaseCluster.waitOnMaster(activeMaster);
    cluster.waitForActiveAndReadyMaster();
    assertTrue("The table should not be in enabled state", cluster.getMaster().getTableStateManager().isTableState(TableName.valueOf(name.getMethodName()), TableState.State.DISABLED, TableState.State.DISABLING));
    log("Enabling table\n");
    // Need a new Admin, the previous one is on the old master
    Admin admin = TEST_UTIL.getAdmin();
    admin.enableTable(tableName);
    admin.close();
    log("Waiting for no more RIT\n");
    TEST_UTIL.waitUntilNoRegionsInTransition(60000);
    log("Verifying there are " + numRegions + " assigned on cluster\n");
    regions = HBaseTestingUtility.getAllOnlineRegions(cluster);
    assertEquals("The assigned regions were not onlined after master" + " switch except for the catalog and namespace tables.", 6, regions.size());
    assertTrue("The table should be in enabled state", cluster.getMaster().getTableStateManager().isTableState(TableName.valueOf(name.getMethodName()), TableState.State.ENABLED));
    ht.close();
    TEST_UTIL.shutdownMiniCluster();
}
Also used : RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) Configuration(org.apache.hadoop.conf.Configuration) MiniHBaseCluster(org.apache.hadoop.hbase.MiniHBaseCluster) Admin(org.apache.hadoop.hbase.client.Admin) TableName(org.apache.hadoop.hbase.TableName) HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) MasterThread(org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread) Test(org.junit.Test)

Example 83 with HBaseTestingUtility

use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.

the class TestSplitLogManager method setup.

@Before
public void setup() throws Exception {
    TEST_UTIL = new HBaseTestingUtility();
    TEST_UTIL.startMiniZKCluster();
    conf = TEST_UTIL.getConfiguration();
    // Use a different ZK wrapper instance for each tests.
    zkw = new ZooKeeperWatcher(conf, "split-log-manager-tests" + UUID.randomUUID().toString(), null);
    master = new DummyMasterServices(zkw, conf);
    ZKUtil.deleteChildrenRecursively(zkw, zkw.znodePaths.baseZNode);
    ZKUtil.createAndFailSilent(zkw, zkw.znodePaths.baseZNode);
    assertTrue(ZKUtil.checkExists(zkw, zkw.znodePaths.baseZNode) != -1);
    LOG.debug(zkw.znodePaths.baseZNode + " created");
    ZKUtil.createAndFailSilent(zkw, zkw.znodePaths.splitLogZNode);
    assertTrue(ZKUtil.checkExists(zkw, zkw.znodePaths.splitLogZNode) != -1);
    LOG.debug(zkw.znodePaths.splitLogZNode + " created");
    resetCounters();
    // By default, we let the test manage the error as before, so the server
    // does not appear as dead from the master point of view, only from the split log pov.
    Mockito.when(sm.isServerOnline(Mockito.any(ServerName.class))).thenReturn(true);
    to = 12000;
    conf.setInt(HConstants.HBASE_SPLITLOG_MANAGER_TIMEOUT, to);
    conf.setInt("hbase.splitlog.manager.unassigned.timeout", 2 * to);
    conf.setInt("hbase.splitlog.manager.timeoutmonitor.period", 100);
    to = to + 16 * 100;
    this.mode = (conf.getBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, false) ? RecoveryMode.LOG_REPLAY : RecoveryMode.LOG_SPLITTING);
}
Also used : HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) ZooKeeperWatcher(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher) ServerName(org.apache.hadoop.hbase.ServerName) Before(org.junit.Before)

Example 84 with HBaseTestingUtility

use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.

the class TestCleanerChore method testNoExceptionFromDirectoryWithRacyChildren.

/**
   * The cleaner runs in a loop, where it first checks to see all the files under a directory can be
   * deleted. If they all can, then we try to delete the directory. However, a file may be added
   * that directory to after the original check. This ensures that we don't accidentally delete that
   * directory on and don't get spurious IOExceptions.
   * <p>
   * This was from HBASE-7465.
   * @throws Exception on failure
   */
@Test
public void testNoExceptionFromDirectoryWithRacyChildren() throws Exception {
    Stoppable stop = new StoppableImplementation();
    // need to use a localutil to not break the rest of the test that runs on the local FS, which
    // gets hosed when we start to use a minicluster.
    HBaseTestingUtility localUtil = new HBaseTestingUtility();
    Configuration conf = localUtil.getConfiguration();
    final Path testDir = UTIL.getDataTestDir();
    final FileSystem fs = UTIL.getTestFileSystem();
    LOG.debug("Writing test data to: " + testDir);
    String confKey = "hbase.test.cleaner.delegates";
    conf.set(confKey, AlwaysDelete.class.getName());
    AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey);
    // spy on the delegate to ensure that we don't check for directories
    AlwaysDelete delegate = (AlwaysDelete) chore.cleanersChain.get(0);
    AlwaysDelete spy = Mockito.spy(delegate);
    chore.cleanersChain.set(0, spy);
    // create the directory layout in the directory to clean
    final Path parent = new Path(testDir, "parent");
    Path file = new Path(parent, "someFile");
    fs.mkdirs(parent);
    // touch a new file
    fs.create(file).close();
    assertTrue("Test file didn't get created.", fs.exists(file));
    final Path racyFile = new Path(parent, "addedFile");
    // when we attempt to delete the original file, add another file in the same directory
    Mockito.doAnswer(new Answer<Boolean>() {

        @Override
        public Boolean answer(InvocationOnMock invocation) throws Throwable {
            fs.create(racyFile).close();
            FSUtils.logFileSystemState(fs, testDir, LOG);
            return (Boolean) invocation.callRealMethod();
        }
    }).when(spy).isFileDeletable(Mockito.any(FileStatus.class));
    // attempt to delete the directory, which
    if (chore.checkAndDeleteDirectory(parent)) {
        throw new Exception("Reported success deleting directory, should have failed when adding file mid-iteration");
    }
    // make sure all the directories + added file exist, but the original file is deleted
    assertTrue("Added file unexpectedly deleted", fs.exists(racyFile));
    assertTrue("Parent directory deleted unexpectedly", fs.exists(parent));
    assertFalse("Original file unexpectedly retained", fs.exists(file));
    Mockito.verify(spy, Mockito.times(1)).isFileDeletable(Mockito.any(FileStatus.class));
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) Configuration(org.apache.hadoop.conf.Configuration) StoppableImplementation(org.apache.hadoop.hbase.util.StoppableImplementation) Stoppable(org.apache.hadoop.hbase.Stoppable) IOException(java.io.IOException) HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) InvocationOnMock(org.mockito.invocation.InvocationOnMock) FileSystem(org.apache.hadoop.fs.FileSystem) Test(org.junit.Test)

Example 85 with HBaseTestingUtility

use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.

the class TestJoinedScanners method testJoinedScanners.

@Test
public void testJoinedScanners() throws Exception {
    String[] dataNodeHosts = new String[] { "host1", "host2", "host3" };
    int regionServersCount = 3;
    HBaseTestingUtility htu = new HBaseTestingUtility();
    final int DEFAULT_BLOCK_SIZE = 1024 * 1024;
    htu.getConfiguration().setLong("dfs.blocksize", DEFAULT_BLOCK_SIZE);
    htu.getConfiguration().setInt("dfs.replication", 1);
    htu.getConfiguration().setLong("hbase.hregion.max.filesize", 322122547200L);
    MiniHBaseCluster cluster = null;
    try {
        cluster = htu.startMiniCluster(1, regionServersCount, dataNodeHosts);
        byte[][] families = { cf_essential, cf_joined };
        final TableName tableName = TableName.valueOf(name.getMethodName());
        HTableDescriptor desc = new HTableDescriptor(tableName);
        for (byte[] family : families) {
            HColumnDescriptor hcd = new HColumnDescriptor(family);
            hcd.setDataBlockEncoding(blockEncoding);
            desc.addFamily(hcd);
        }
        htu.getAdmin().createTable(desc);
        Table ht = htu.getConnection().getTable(tableName);
        long rows_to_insert = 1000;
        int insert_batch = 20;
        long time = System.nanoTime();
        Random rand = new Random(time);
        LOG.info("Make " + Long.toString(rows_to_insert) + " rows, total size = " + Float.toString(rows_to_insert * valueWidth / 1024 / 1024) + " MB");
        byte[] val_large = new byte[valueWidth];
        List<Put> puts = new ArrayList<>();
        for (long i = 0; i < rows_to_insert; i++) {
            Put put = new Put(Bytes.toBytes(Long.toString(i)));
            if (rand.nextInt(100) <= selectionRatio) {
                put.addColumn(cf_essential, col_name, flag_yes);
            } else {
                put.addColumn(cf_essential, col_name, flag_no);
            }
            put.addColumn(cf_joined, col_name, val_large);
            puts.add(put);
            if (puts.size() >= insert_batch) {
                ht.put(puts);
                puts.clear();
            }
        }
        if (puts.size() >= 0) {
            ht.put(puts);
            puts.clear();
        }
        LOG.info("Data generated in " + Double.toString((System.nanoTime() - time) / 1000000000.0) + " seconds");
        boolean slow = true;
        for (int i = 0; i < 10; ++i) {
            runScanner(ht, slow);
            slow = !slow;
        }
        ht.close();
    } finally {
        if (cluster != null) {
            htu.shutdownMiniCluster();
        }
    }
}
Also used : Table(org.apache.hadoop.hbase.client.Table) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) ArrayList(java.util.ArrayList) MiniHBaseCluster(org.apache.hadoop.hbase.MiniHBaseCluster) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) TableName(org.apache.hadoop.hbase.TableName) Random(java.util.Random) HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) Test(org.junit.Test)

Aggregations

HBaseTestingUtility (org.apache.hadoop.hbase.HBaseTestingUtility)136 Configuration (org.apache.hadoop.conf.Configuration)50 BeforeClass (org.junit.BeforeClass)49 Test (org.junit.Test)42 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)35 Path (org.apache.hadoop.fs.Path)29 Admin (org.apache.hadoop.hbase.client.Admin)24 FileSystem (org.apache.hadoop.fs.FileSystem)22 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)20 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)18 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)16 Before (org.junit.Before)14 MiniHBaseCluster (org.apache.hadoop.hbase.MiniHBaseCluster)11 ZooKeeperWatcher (org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher)11 MiniZooKeeperCluster (org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster)10 Table (org.apache.hadoop.hbase.client.Table)8 HFileSystem (org.apache.hadoop.hbase.fs.HFileSystem)8 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)8 FileStatus (org.apache.hadoop.fs.FileStatus)7 Result (org.apache.hadoop.hbase.client.Result)7