Search in sources :

Example 56 with HBaseTestingUtil

use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.

the class TestRegionObserverScannerOpenHook method testRegionObserverScanTimeStacking.

@Test
public void testRegionObserverScanTimeStacking() throws Exception {
    byte[] ROW = Bytes.toBytes("testRow");
    byte[] TABLE = Bytes.toBytes(getClass().getName());
    byte[] A = Bytes.toBytes("A");
    byte[][] FAMILIES = new byte[][] { A };
    // Use new HTU to not overlap with the DFS cluster started in #CompactionStacking
    Configuration conf = new HBaseTestingUtil().getConfiguration();
    HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
    RegionCoprocessorHost h = region.getCoprocessorHost();
    h.load(NoDataFromScan.class, Coprocessor.PRIORITY_HIGHEST, conf);
    h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf);
    Put put = new Put(ROW);
    put.addColumn(A, A, A);
    region.put(put);
    Get get = new Get(ROW);
    Result r = region.get(get);
    assertNull("Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: " + r, r.listCells());
    HBaseTestingUtil.closeRegionAndWAL(region);
}
Also used : HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Configuration(org.apache.hadoop.conf.Configuration) RegionCoprocessorHost(org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost) Get(org.apache.hadoop.hbase.client.Get) HBaseTestingUtil(org.apache.hadoop.hbase.HBaseTestingUtil) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result) Test(org.junit.Test)

Example 57 with HBaseTestingUtil

use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.

the class TestServerRemoteProcedure method setUp.

@Before
public void setUp() throws Exception {
    util = new HBaseTestingUtil();
    this.executor = Executors.newSingleThreadScheduledExecutor(new ThreadFactoryBuilder().setUncaughtExceptionHandler((t, e) -> LOG.warn("Uncaught: ", e)).build());
    master = new MockMasterServices(util.getConfiguration(), this.regionsToRegionServers);
    rsDispatcher = new MockRSProcedureDispatcher(master);
    rsDispatcher.setMockRsExecutor(new NoopRSExecutor());
    master.start(2, rsDispatcher);
    am = master.getAssignmentManager();
    master.getServerManager().getOnlineServersList().stream().forEach(serverName -> am.getRegionStates().getOrCreateServer(serverName));
}
Also used : AssignmentManager(org.apache.hadoop.hbase.master.assignment.AssignmentManager) SortedSet(java.util.SortedSet) MockMasterServices(org.apache.hadoop.hbase.master.assignment.MockMasterServices) ThreadFactoryBuilder(org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder) LoggerFactory(org.slf4j.LoggerFactory) TimeoutException(java.util.concurrent.TimeoutException) SWITCH_RPC_THROTTLE(org.apache.hadoop.hbase.master.procedure.ServerProcedureInterface.ServerOperationType.SWITCH_RPC_THROTTLE) Future(java.util.concurrent.Future) TestName(org.junit.rules.TestName) ProcedureStateSerializer(org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer) After(org.junit.After) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) MasterServices(org.apache.hadoop.hbase.master.MasterServices) Assert.fail(org.junit.Assert.fail) OpenRegionProcedure(org.apache.hadoop.hbase.master.assignment.OpenRegionProcedure) ClassRule(org.junit.ClassRule) ExpectedException(org.junit.rules.ExpectedException) ExecutorService(java.util.concurrent.ExecutorService) ServerName(org.apache.hadoop.hbase.ServerName) Bytes(org.apache.hadoop.hbase.util.Bytes) Before(org.junit.Before) TableName(org.apache.hadoop.hbase.TableName) Logger(org.slf4j.Logger) HBaseTestingUtil(org.apache.hadoop.hbase.HBaseTestingUtil) MediumTests(org.apache.hadoop.hbase.testclassification.MediumTests) RemoteProcedureDispatcher(org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher) Set(java.util.Set) HBaseClassTestRule(org.apache.hadoop.hbase.HBaseClassTestRule) IOException(java.io.IOException) Test(org.junit.Test) NavigableMap(java.util.NavigableMap) Category(org.junit.experimental.categories.Category) Procedure(org.apache.hadoop.hbase.procedure2.Procedure) Executors(java.util.concurrent.Executors) TimeUnit(java.util.concurrent.TimeUnit) ConcurrentSkipListMap(java.util.concurrent.ConcurrentSkipListMap) Rule(org.junit.Rule) AdminProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos) TransitRegionStateProcedure(org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure) RemoteProcedureException(org.apache.hadoop.hbase.procedure2.RemoteProcedureException) MasterTests(org.apache.hadoop.hbase.testclassification.MasterTests) Optional(java.util.Optional) Assert(org.junit.Assert) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) RegionInfoBuilder(org.apache.hadoop.hbase.client.RegionInfoBuilder) MockMasterServices(org.apache.hadoop.hbase.master.assignment.MockMasterServices) ThreadFactoryBuilder(org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder) HBaseTestingUtil(org.apache.hadoop.hbase.HBaseTestingUtil) Before(org.junit.Before)

Example 58 with HBaseTestingUtil

use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.

the class TestHRegion method testgetHDFSBlocksDistribution.

@Test
public void testgetHDFSBlocksDistribution() throws Exception {
    HBaseTestingUtil htu = new HBaseTestingUtil();
    // Why do we set the block size in this test?  If we set it smaller than the kvs, then we'll
    // break up the file in to more pieces that can be distributed across the three nodes and we
    // won't be able to have the condition this test asserts; that at least one node has
    // a copy of all replicas -- if small block size, then blocks are spread evenly across the
    // the three nodes.  hfilev3 with tags seems to put us over the block size.  St.Ack.
    // final int DEFAULT_BLOCK_SIZE = 1024;
    // htu.getConfiguration().setLong("dfs.blocksize", DEFAULT_BLOCK_SIZE);
    htu.getConfiguration().setInt("dfs.replication", 2);
    // set up a cluster with 3 nodes
    SingleProcessHBaseCluster cluster = null;
    String[] dataNodeHosts = new String[] { "host1", "host2", "host3" };
    int regionServersCount = 3;
    try {
        StartTestingClusterOption option = StartTestingClusterOption.builder().numRegionServers(regionServersCount).dataNodeHosts(dataNodeHosts).build();
        cluster = htu.startMiniCluster(option);
        byte[][] families = { fam1, fam2 };
        Table ht = htu.createTable(tableName, families);
        // Setting up region
        byte[] row = Bytes.toBytes("row1");
        byte[] col = Bytes.toBytes("col1");
        Put put = new Put(row);
        put.addColumn(fam1, col, 1, Bytes.toBytes("test1"));
        put.addColumn(fam2, col, 1, Bytes.toBytes("test2"));
        ht.put(put);
        HRegion firstRegion = htu.getHBaseCluster().getRegions(tableName).get(0);
        firstRegion.flush(true);
        HDFSBlocksDistribution blocksDistribution1 = firstRegion.getHDFSBlocksDistribution();
        // Given the default replication factor is 2 and we have 2 HFiles,
        // we will have total of 4 replica of blocks on 3 datanodes; thus there
        // must be at least one host that have replica for 2 HFiles. That host's
        // weight will be equal to the unique block weight.
        long uniqueBlocksWeight1 = blocksDistribution1.getUniqueBlocksTotalWeight();
        StringBuilder sb = new StringBuilder();
        for (String host : blocksDistribution1.getTopHosts()) {
            if (sb.length() > 0)
                sb.append(", ");
            sb.append(host);
            sb.append("=");
            sb.append(blocksDistribution1.getWeight(host));
        }
        String topHost = blocksDistribution1.getTopHosts().get(0);
        long topHostWeight = blocksDistribution1.getWeight(topHost);
        String msg = "uniqueBlocksWeight=" + uniqueBlocksWeight1 + ", topHostWeight=" + topHostWeight + ", topHost=" + topHost + "; " + sb.toString();
        LOG.info(msg);
        assertTrue(msg, uniqueBlocksWeight1 == topHostWeight);
        // use the static method to compute the value, it should be the same.
        // static method is used by load balancer or other components
        HDFSBlocksDistribution blocksDistribution2 = HRegion.computeHDFSBlocksDistribution(htu.getConfiguration(), firstRegion.getTableDescriptor(), firstRegion.getRegionInfo());
        long uniqueBlocksWeight2 = blocksDistribution2.getUniqueBlocksTotalWeight();
        assertTrue(uniqueBlocksWeight1 == uniqueBlocksWeight2);
        ht.close();
    } finally {
        if (cluster != null) {
            htu.shutdownMiniCluster();
        }
    }
}
Also used : SingleProcessHBaseCluster(org.apache.hadoop.hbase.SingleProcessHBaseCluster) Table(org.apache.hadoop.hbase.client.Table) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) ByteString(org.apache.hbase.thirdparty.com.google.protobuf.ByteString) HBaseTestingUtil(org.apache.hadoop.hbase.HBaseTestingUtil) HDFSBlocksDistribution(org.apache.hadoop.hbase.HDFSBlocksDistribution) Put(org.apache.hadoop.hbase.client.Put) StartTestingClusterOption(org.apache.hadoop.hbase.StartTestingClusterOption) Test(org.junit.Test)

Example 59 with HBaseTestingUtil

use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.

the class TestHRegionFileSystem method testBlockStoragePolicy.

@Test
public void testBlockStoragePolicy() throws Exception {
    TEST_UTIL = new HBaseTestingUtil();
    Configuration conf = TEST_UTIL.getConfiguration();
    TEST_UTIL.startMiniCluster();
    Table table = TEST_UTIL.createTable(TABLE_NAME, FAMILIES);
    assertEquals("Should start with empty table", 0, TEST_UTIL.countRows(table));
    HRegionFileSystem regionFs = getHRegionFS(TEST_UTIL.getConnection(), table, conf);
    // the original block storage policy would be HOT
    String spA = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[0]));
    String spB = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[1]));
    LOG.debug("Storage policy of cf 0: [" + spA + "].");
    LOG.debug("Storage policy of cf 1: [" + spB + "].");
    assertEquals("HOT", spA);
    assertEquals("HOT", spB);
    // Recreate table and make sure storage policy could be set through configuration
    TEST_UTIL.shutdownMiniCluster();
    TEST_UTIL.getConfiguration().set(HStore.BLOCK_STORAGE_POLICY_KEY, "WARM");
    TEST_UTIL.startMiniCluster();
    table = TEST_UTIL.createTable(TABLE_NAME, FAMILIES);
    regionFs = getHRegionFS(TEST_UTIL.getConnection(), table, conf);
    try (Admin admin = TEST_UTIL.getConnection().getAdmin()) {
        spA = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[0]));
        spB = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[1]));
        LOG.debug("Storage policy of cf 0: [" + spA + "].");
        LOG.debug("Storage policy of cf 1: [" + spB + "].");
        assertEquals("WARM", spA);
        assertEquals("WARM", spB);
        // alter table cf schema to change storage policies
        // and make sure it could override settings in conf
        ColumnFamilyDescriptorBuilder cfdA = ColumnFamilyDescriptorBuilder.newBuilder(FAMILIES[0]);
        // alter through setting HStore#BLOCK_STORAGE_POLICY_KEY in HColumnDescriptor
        cfdA.setValue(HStore.BLOCK_STORAGE_POLICY_KEY, "ONE_SSD");
        admin.modifyColumnFamily(TABLE_NAME, cfdA.build());
        while (TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates().hasRegionsInTransition()) {
            Thread.sleep(200);
            LOG.debug("Waiting on table to finish schema altering");
        }
        // alter through HColumnDescriptor#setStoragePolicy
        ColumnFamilyDescriptorBuilder cfdB = ColumnFamilyDescriptorBuilder.newBuilder(FAMILIES[1]);
        cfdB.setStoragePolicy("ALL_SSD");
        admin.modifyColumnFamily(TABLE_NAME, cfdB.build());
        while (TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates().hasRegionsInTransition()) {
            Thread.sleep(200);
            LOG.debug("Waiting on table to finish schema altering");
        }
        spA = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[0]));
        spB = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[1]));
        LOG.debug("Storage policy of cf 0: [" + spA + "].");
        LOG.debug("Storage policy of cf 1: [" + spB + "].");
        assertNotNull(spA);
        assertEquals("ONE_SSD", spA);
        assertNotNull(spB);
        assertEquals("ALL_SSD", spB);
        // flush memstore snapshot into 3 files
        for (long i = 0; i < 3; i++) {
            Put put = new Put(Bytes.toBytes(i));
            put.addColumn(FAMILIES[0], Bytes.toBytes(i), Bytes.toBytes(i));
            table.put(put);
            admin.flush(TABLE_NAME);
        }
        // there should be 3 files in store dir
        FileSystem fs = TEST_UTIL.getDFSCluster().getFileSystem();
        Path storePath = regionFs.getStoreDir(Bytes.toString(FAMILIES[0]));
        FileStatus[] storeFiles = CommonFSUtils.listStatus(fs, storePath);
        assertNotNull(storeFiles);
        assertEquals(3, storeFiles.length);
        // store temp dir still exists but empty
        Path storeTempDir = new Path(regionFs.getTempDir(), Bytes.toString(FAMILIES[0]));
        assertTrue(fs.exists(storeTempDir));
        FileStatus[] tempFiles = CommonFSUtils.listStatus(fs, storeTempDir);
        assertNull(tempFiles);
        // storage policy of cf temp dir and 3 store files should be ONE_SSD
        assertEquals("ONE_SSD", ((HFileSystem) regionFs.getFileSystem()).getStoragePolicyName(storeTempDir));
        for (FileStatus status : storeFiles) {
            assertEquals("ONE_SSD", ((HFileSystem) regionFs.getFileSystem()).getStoragePolicyName(status.getPath()));
        }
        // change storage policies by calling raw api directly
        regionFs.setStoragePolicy(Bytes.toString(FAMILIES[0]), "ALL_SSD");
        regionFs.setStoragePolicy(Bytes.toString(FAMILIES[1]), "ONE_SSD");
        spA = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[0]));
        spB = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[1]));
        LOG.debug("Storage policy of cf 0: [" + spA + "].");
        LOG.debug("Storage policy of cf 1: [" + spB + "].");
        assertNotNull(spA);
        assertEquals("ALL_SSD", spA);
        assertNotNull(spB);
        assertEquals("ONE_SSD", spB);
    } finally {
        table.close();
        TEST_UTIL.deleteTable(TABLE_NAME);
        TEST_UTIL.shutdownMiniCluster();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Table(org.apache.hadoop.hbase.client.Table) FileStatus(org.apache.hadoop.fs.FileStatus) Configuration(org.apache.hadoop.conf.Configuration) ColumnFamilyDescriptorBuilder(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder) HBaseTestingUtil(org.apache.hadoop.hbase.HBaseTestingUtil) Admin(org.apache.hadoop.hbase.client.Admin) Put(org.apache.hadoop.hbase.client.Put) FileSystem(org.apache.hadoop.fs.FileSystem) HFileSystem(org.apache.hadoop.hbase.fs.HFileSystem) Test(org.junit.Test)

Example 60 with HBaseTestingUtil

use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.

the class TestInputStreamBlockDistribution method setUp.

@Before
public void setUp() throws Exception {
    HBaseTestingUtil testUtil = new HBaseTestingUtil();
    conf = testUtil.getConfiguration();
    conf.setInt("dfs.blocksize", 1024 * 1024);
    conf.setInt("dfs.client.read.prefetch.size", 2 * 1024 * 1024);
    testUtil.startMiniDFSCluster(1);
    MiniDFSCluster cluster = testUtil.getDFSCluster();
    fs = cluster.getFileSystem();
    testPath = new Path(testUtil.getDefaultRootDirPath(), "test.file");
    writeSomeData(fs, testPath, 256 << 20, (byte) 2);
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HBaseTestingUtil(org.apache.hadoop.hbase.HBaseTestingUtil) Before(org.junit.Before)

Aggregations

HBaseTestingUtil (org.apache.hadoop.hbase.HBaseTestingUtil)144 Configuration (org.apache.hadoop.conf.Configuration)42 Test (org.junit.Test)42 Before (org.junit.Before)41 BeforeClass (org.junit.BeforeClass)37 Path (org.apache.hadoop.fs.Path)24 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)22 Admin (org.apache.hadoop.hbase.client.Admin)22 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)15 StartTestingClusterOption (org.apache.hadoop.hbase.StartTestingClusterOption)14 FileSystem (org.apache.hadoop.fs.FileSystem)13 MiniZooKeeperCluster (org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster)12 TableName (org.apache.hadoop.hbase.TableName)10 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)10 SingleProcessHBaseCluster (org.apache.hadoop.hbase.SingleProcessHBaseCluster)9 ServerName (org.apache.hadoop.hbase.ServerName)8 Table (org.apache.hadoop.hbase.client.Table)8 ZKWatcher (org.apache.hadoop.hbase.zookeeper.ZKWatcher)8 IOException (java.io.IOException)7 ArrayList (java.util.ArrayList)7