Search in sources :

Example 91 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project hbase by apache.

the class TestCoprocessorConfiguration method testRegionCoprocessorHostTableLoadingDisabled.

@Test
public void testRegionCoprocessorHostTableLoadingDisabled() throws Exception {
    Configuration conf = new Configuration(CONF);
    // if defaults change
    conf.setBoolean(CoprocessorHost.COPROCESSORS_ENABLED_CONF_KEY, true);
    conf.setBoolean(CoprocessorHost.USER_COPROCESSORS_ENABLED_CONF_KEY, false);
    HRegion region = mock(HRegion.class);
    when(region.getRegionInfo()).thenReturn(REGIONINFO);
    when(region.getTableDesc()).thenReturn(TABLEDESC);
    RegionServerServices rsServices = mock(RegionServerServices.class);
    systemCoprocessorLoaded.set(false);
    tableCoprocessorLoaded.set(false);
    new RegionCoprocessorHost(region, rsServices, conf);
    assertTrue("System coprocessors should have been loaded", systemCoprocessorLoaded.get());
    assertFalse("Table coprocessors should not have been loaded", tableCoprocessorLoaded.get());
}
Also used : HRegion(org.apache.hadoop.hbase.regionserver.HRegion) RegionServerServices(org.apache.hadoop.hbase.regionserver.RegionServerServices) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) Configuration(org.apache.hadoop.conf.Configuration) RegionCoprocessorHost(org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost) Test(org.junit.Test)

Example 92 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project hbase by apache.

the class TestClientPushback method testClientTracksServerPushback.

@Test(timeout = 60000)
public void testClientTracksServerPushback() throws Exception {
    Configuration conf = UTIL.getConfiguration();
    ClusterConnection conn = (ClusterConnection) ConnectionFactory.createConnection(conf);
    Table table = conn.getTable(tableName);
    HRegionServer rs = UTIL.getHBaseCluster().getRegionServer(0);
    Region region = rs.getOnlineRegions(tableName).get(0);
    LOG.debug("Writing some data to " + tableName);
    // write some data
    Put p = new Put(Bytes.toBytes("row"));
    p.addColumn(family, qualifier, Bytes.toBytes("value1"));
    table.put(p);
    // get the current load on RS. Hopefully memstore isn't flushed since we wrote the the data
    int load = (int) ((((HRegion) region).addAndGetMemstoreSize(new MemstoreSize(0, 0)) * 100) / flushSizeBytes);
    LOG.debug("Done writing some data to " + tableName);
    // get the stats for the region hosting our table
    ClientBackoffPolicy backoffPolicy = conn.getBackoffPolicy();
    assertTrue("Backoff policy is not correctly configured", backoffPolicy instanceof ExponentialClientBackoffPolicy);
    ServerStatisticTracker stats = conn.getStatisticsTracker();
    assertNotNull("No stats configured for the client!", stats);
    // get the names so we can query the stats
    ServerName server = rs.getServerName();
    byte[] regionName = region.getRegionInfo().getRegionName();
    // check to see we found some load on the memstore
    ServerStatistics serverStats = stats.getServerStatsForTesting(server);
    ServerStatistics.RegionStatistics regionStats = serverStats.getStatsForRegion(regionName);
    assertEquals("We did not find some load on the memstore", load, regionStats.getMemstoreLoadPercent());
    // check that the load reported produces a nonzero delay
    long backoffTime = backoffPolicy.getBackoffTime(server, regionName, serverStats);
    assertNotEquals("Reported load does not produce a backoff", backoffTime, 0);
    LOG.debug("Backoff calculated for " + region.getRegionInfo().getRegionNameAsString() + " @ " + server + " is " + backoffTime);
    // Reach into the connection and submit work directly to AsyncProcess so we can
    // monitor how long the submission was delayed via a callback
    List<Row> ops = new ArrayList<>(1);
    ops.add(p);
    final CountDownLatch latch = new CountDownLatch(1);
    final AtomicLong endTime = new AtomicLong();
    long startTime = EnvironmentEdgeManager.currentTime();
    BufferedMutatorImpl mutator = ((HTable) table).mutator;
    Batch.Callback<Result> callback = (byte[] r, byte[] row, Result result) -> {
        endTime.set(EnvironmentEdgeManager.currentTime());
        latch.countDown();
    };
    AsyncProcessTask<Result> task = AsyncProcessTask.newBuilder(callback).setPool(mutator.getPool()).setTableName(tableName).setRowAccess(ops).setSubmittedRows(AsyncProcessTask.SubmittedRows.AT_LEAST_ONE).setOperationTimeout(conn.getConnectionConfiguration().getOperationTimeout()).setRpcTimeout(60 * 1000).build();
    mutator.getAsyncProcess().submit(task);
    // Currently the ExponentialClientBackoffPolicy under these test conditions
    // produces a backoffTime of 151 milliseconds. This is long enough so the
    // wait and related checks below are reasonable. Revisit if the backoff
    // time reported by above debug logging has significantly deviated.
    String name = server.getServerName() + "," + Bytes.toStringBinary(regionName);
    MetricsConnection.RegionStats rsStats = conn.getConnectionMetrics().serverStats.get(server).get(regionName);
    assertEquals(name, rsStats.name);
    assertEquals(rsStats.heapOccupancyHist.getSnapshot().getMean(), (double) regionStats.getHeapOccupancyPercent(), 0.1);
    assertEquals(rsStats.memstoreLoadHist.getSnapshot().getMean(), (double) regionStats.getMemstoreLoadPercent(), 0.1);
    MetricsConnection.RunnerStats runnerStats = conn.getConnectionMetrics().runnerStats;
    assertEquals(runnerStats.delayRunners.getCount(), 1);
    assertEquals(runnerStats.normalRunners.getCount(), 1);
    assertEquals("", runnerStats.delayIntevalHist.getSnapshot().getMean(), (double) backoffTime, 0.1);
    latch.await(backoffTime * 2, TimeUnit.MILLISECONDS);
    assertNotEquals("AsyncProcess did not submit the work time", endTime.get(), 0);
    assertTrue("AsyncProcess did not delay long enough", endTime.get() - startTime >= backoffTime);
}
Also used : ClientBackoffPolicy(org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy) ExponentialClientBackoffPolicy(org.apache.hadoop.hbase.client.backoff.ExponentialClientBackoffPolicy) Configuration(org.apache.hadoop.conf.Configuration) ServerStatistics(org.apache.hadoop.hbase.client.backoff.ServerStatistics) ArrayList(java.util.ArrayList) Batch(org.apache.hadoop.hbase.client.coprocessor.Batch) ExponentialClientBackoffPolicy(org.apache.hadoop.hbase.client.backoff.ExponentialClientBackoffPolicy) MemstoreSize(org.apache.hadoop.hbase.regionserver.MemstoreSize) CountDownLatch(java.util.concurrent.CountDownLatch) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer) AtomicLong(java.util.concurrent.atomic.AtomicLong) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Region(org.apache.hadoop.hbase.regionserver.Region) Test(org.junit.Test)

Example 93 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project hbase by apache.

the class TestBlockReorder method testHBaseCluster.

/**
   * Test that the hook works within HBase, including when there are multiple blocks.
   */
@Test()
public void testHBaseCluster() throws Exception {
    byte[] sb = "sb".getBytes();
    htu.startMiniZKCluster();
    MiniHBaseCluster hbm = htu.startMiniHBaseCluster(1, 1);
    hbm.waitForActiveAndReadyMaster();
    HRegionServer targetRs = hbm.getMaster();
    // We want to have a datanode with the same name as the region server, so
    //  we're going to get the regionservername, and start a new datanode with this name.
    String host4 = targetRs.getServerName().getHostname();
    LOG.info("Starting a new datanode with the name=" + host4);
    cluster.startDataNodes(conf, 1, true, null, new String[] { "/r4" }, new String[] { host4 }, null);
    cluster.waitClusterUp();
    final int repCount = 3;
    // We use the regionserver file system & conf as we expect it to have the hook.
    conf = targetRs.getConfiguration();
    HFileSystem rfs = (HFileSystem) targetRs.getFileSystem();
    Table h = htu.createTable(TableName.valueOf(name.getMethodName()), sb);
    // Now, we have 4 datanodes and a replication count of 3. So we don't know if the datanode
    // with the same node will be used. We can't really stop an existing datanode, this would
    // make us fall in nasty hdfs bugs/issues. So we're going to try multiple times.
    // Now we need to find the log file, its locations, and look at it
    String rootDir = new Path(FSUtils.getRootDir(conf) + "/" + HConstants.HREGION_LOGDIR_NAME + "/" + targetRs.getServerName().toString()).toUri().getPath();
    DistributedFileSystem mdfs = (DistributedFileSystem) hbm.getMaster().getMasterFileSystem().getFileSystem();
    int nbTest = 0;
    while (nbTest < 10) {
        final List<Region> regions = targetRs.getOnlineRegions(h.getName());
        final CountDownLatch latch = new CountDownLatch(regions.size());
        // listen for successful log rolls
        final WALActionsListener listener = new WALActionsListener.Base() {

            @Override
            public void postLogRoll(final Path oldPath, final Path newPath) throws IOException {
                latch.countDown();
            }
        };
        for (Region region : regions) {
            ((HRegion) region).getWAL().registerWALActionsListener(listener);
        }
        htu.getAdmin().rollWALWriter(targetRs.getServerName());
        // wait
        try {
            latch.await();
        } catch (InterruptedException exception) {
            LOG.warn("Interrupted while waiting for the wal of '" + targetRs + "' to roll. If later " + "tests fail, it's probably because we should still be waiting.");
            Thread.currentThread().interrupt();
        }
        for (Region region : regions) {
            ((HRegion) region).getWAL().unregisterWALActionsListener(listener);
        }
        // We need a sleep as the namenode is informed asynchronously
        Thread.sleep(100);
        // insert one put to ensure a minimal size
        Put p = new Put(sb);
        p.addColumn(sb, sb, sb);
        h.put(p);
        DirectoryListing dl = dfs.getClient().listPaths(rootDir, HdfsFileStatus.EMPTY_NAME);
        HdfsFileStatus[] hfs = dl.getPartialListing();
        // As we wrote a put, we should have at least one log file.
        Assert.assertTrue(hfs.length >= 1);
        for (HdfsFileStatus hf : hfs) {
            // Because this is a live cluster, log files might get archived while we're processing
            try {
                LOG.info("Log file found: " + hf.getLocalName() + " in " + rootDir);
                String logFile = rootDir + "/" + hf.getLocalName();
                FileStatus fsLog = rfs.getFileStatus(new Path(logFile));
                LOG.info("Checking log file: " + logFile);
                // Now checking that the hook is up and running
                // We can't call directly getBlockLocations, it's not available in HFileSystem
                // We're trying multiple times to be sure, as the order is random
                BlockLocation[] bls = rfs.getFileBlockLocations(fsLog, 0, 1);
                if (bls.length > 0) {
                    BlockLocation bl = bls[0];
                    LOG.info(bl.getHosts().length + " replicas for block 0 in " + logFile + " ");
                    for (int i = 0; i < bl.getHosts().length - 1; i++) {
                        LOG.info(bl.getHosts()[i] + "    " + logFile);
                        Assert.assertNotSame(bl.getHosts()[i], host4);
                    }
                    String last = bl.getHosts()[bl.getHosts().length - 1];
                    LOG.info(last + "    " + logFile);
                    if (host4.equals(last)) {
                        nbTest++;
                        LOG.info(logFile + " is on the new datanode and is ok");
                        if (bl.getHosts().length == 3) {
                            // We can test this case from the file system as well
                            // Checking the underlying file system. Multiple times as the order is random
                            testFromDFS(dfs, logFile, repCount, host4);
                            // now from the master
                            testFromDFS(mdfs, logFile, repCount, host4);
                        }
                    }
                }
            } catch (FileNotFoundException exception) {
                LOG.debug("Failed to find log file '" + hf.getLocalName() + "'; it probably was " + "archived out from under us so we'll ignore and retry. If this test hangs " + "indefinitely you should treat this failure as a symptom.", exception);
            } catch (RemoteException exception) {
                if (exception.unwrapRemoteException() instanceof FileNotFoundException) {
                    LOG.debug("Failed to find log file '" + hf.getLocalName() + "'; it probably was " + "archived out from under us so we'll ignore and retry. If this test hangs " + "indefinitely you should treat this failure as a symptom.", exception);
                } else {
                    throw exception;
                }
            }
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DirectoryListing(org.apache.hadoop.hdfs.protocol.DirectoryListing) Table(org.apache.hadoop.hbase.client.Table) FileStatus(org.apache.hadoop.fs.FileStatus) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) FileNotFoundException(java.io.FileNotFoundException) MiniHBaseCluster(org.apache.hadoop.hbase.MiniHBaseCluster) WALActionsListener(org.apache.hadoop.hbase.regionserver.wal.WALActionsListener) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) CountDownLatch(java.util.concurrent.CountDownLatch) BlockLocation(org.apache.hadoop.fs.BlockLocation) Put(org.apache.hadoop.hbase.client.Put) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) Region(org.apache.hadoop.hbase.regionserver.Region) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) RemoteException(org.apache.hadoop.ipc.RemoteException) Test(org.junit.Test)

Example 94 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project hbase by apache.

the class TestMultipleColumnPrefixFilter method testMultipleColumnPrefixFilter.

@Test
public void testMultipleColumnPrefixFilter() throws IOException {
    String family = "Family";
    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName()));
    HColumnDescriptor hcd = new HColumnDescriptor(family);
    hcd.setMaxVersions(3);
    htd.addFamily(hcd);
    // HRegionInfo info = new HRegionInfo(htd, null, null, false);
    HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
    HRegion region = HBaseTestingUtility.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(), TEST_UTIL.getConfiguration(), htd);
    List<String> rows = generateRandomWords(100, "row");
    List<String> columns = generateRandomWords(10000, "column");
    long maxTimestamp = 2;
    List<Cell> kvList = new ArrayList<>();
    Map<String, List<Cell>> prefixMap = new HashMap<>();
    prefixMap.put("p", new ArrayList<>());
    prefixMap.put("q", new ArrayList<>());
    prefixMap.put("s", new ArrayList<>());
    String valueString = "ValueString";
    for (String row : rows) {
        Put p = new Put(Bytes.toBytes(row));
        p.setDurability(Durability.SKIP_WAL);
        for (String column : columns) {
            for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) {
                KeyValue kv = KeyValueTestUtil.create(row, family, column, timestamp, valueString);
                p.add(kv);
                kvList.add(kv);
                for (String s : prefixMap.keySet()) {
                    if (column.startsWith(s)) {
                        prefixMap.get(s).add(kv);
                    }
                }
            }
        }
        region.put(p);
    }
    MultipleColumnPrefixFilter filter;
    Scan scan = new Scan();
    scan.setMaxVersions();
    byte[][] filter_prefix = new byte[2][];
    filter_prefix[0] = new byte[] { 'p' };
    filter_prefix[1] = new byte[] { 'q' };
    filter = new MultipleColumnPrefixFilter(filter_prefix);
    scan.setFilter(filter);
    List<Cell> results = new ArrayList<>();
    InternalScanner scanner = region.getScanner(scan);
    while (scanner.next(results)) ;
    assertEquals(prefixMap.get("p").size() + prefixMap.get("q").size(), results.size());
    HBaseTestingUtility.closeRegionAndWAL(region);
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) InternalScanner(org.apache.hadoop.hbase.regionserver.InternalScanner) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) ArrayList(java.util.ArrayList) List(java.util.List) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell) Test(org.junit.Test)

Example 95 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project hbase by apache.

the class TestMultipleColumnPrefixFilter method testMultipleColumnPrefixFilterWithColumnPrefixFilter.

@Test
public void testMultipleColumnPrefixFilterWithColumnPrefixFilter() throws IOException {
    String family = "Family";
    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName()));
    htd.addFamily(new HColumnDescriptor(family));
    HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
    HRegion region = HBaseTestingUtility.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(), TEST_UTIL.getConfiguration(), htd);
    List<String> rows = generateRandomWords(100, "row");
    List<String> columns = generateRandomWords(10000, "column");
    long maxTimestamp = 2;
    String valueString = "ValueString";
    for (String row : rows) {
        Put p = new Put(Bytes.toBytes(row));
        p.setDurability(Durability.SKIP_WAL);
        for (String column : columns) {
            for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) {
                KeyValue kv = KeyValueTestUtil.create(row, family, column, timestamp, valueString);
                p.add(kv);
            }
        }
        region.put(p);
    }
    MultipleColumnPrefixFilter multiplePrefixFilter;
    Scan scan1 = new Scan();
    scan1.setMaxVersions();
    byte[][] filter_prefix = new byte[1][];
    filter_prefix[0] = new byte[] { 'p' };
    multiplePrefixFilter = new MultipleColumnPrefixFilter(filter_prefix);
    scan1.setFilter(multiplePrefixFilter);
    List<Cell> results1 = new ArrayList<>();
    InternalScanner scanner1 = region.getScanner(scan1);
    while (scanner1.next(results1)) ;
    ColumnPrefixFilter singlePrefixFilter;
    Scan scan2 = new Scan();
    scan2.setMaxVersions();
    singlePrefixFilter = new ColumnPrefixFilter(Bytes.toBytes("p"));
    scan2.setFilter(singlePrefixFilter);
    List<Cell> results2 = new ArrayList<>();
    InternalScanner scanner2 = region.getScanner(scan1);
    while (scanner2.next(results2)) ;
    assertEquals(results1.size(), results2.size());
    HBaseTestingUtility.closeRegionAndWAL(region);
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) InternalScanner(org.apache.hadoop.hbase.regionserver.InternalScanner) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) ArrayList(java.util.ArrayList) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell) Test(org.junit.Test)

Aggregations

HRegion (org.apache.hadoop.hbase.regionserver.HRegion)148 Test (org.junit.Test)88 Put (org.apache.hadoop.hbase.client.Put)56 Path (org.apache.hadoop.fs.Path)40 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)40 Scan (org.apache.hadoop.hbase.client.Scan)37 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)36 Cell (org.apache.hadoop.hbase.Cell)35 TableId (co.cask.cdap.data2.util.TableId)32 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)28 IOException (java.io.IOException)26 WAL (org.apache.hadoop.hbase.wal.WAL)25 FileSystem (org.apache.hadoop.fs.FileSystem)24 ArrayList (java.util.ArrayList)22 TableName (org.apache.hadoop.hbase.TableName)22 Configuration (org.apache.hadoop.conf.Configuration)21 Result (org.apache.hadoop.hbase.client.Result)21 Region (org.apache.hadoop.hbase.regionserver.Region)21 MiniHBaseCluster (org.apache.hadoop.hbase.MiniHBaseCluster)19 RegionScanner (org.apache.hadoop.hbase.regionserver.RegionScanner)19