Search in sources :

Example 1 with MemstoreSize

use of org.apache.hadoop.hbase.regionserver.MemstoreSize in project hbase by apache.

the class TestClientPushback method testClientTracksServerPushback.

@Test(timeout = 60000)
public void testClientTracksServerPushback() throws Exception {
    Configuration conf = UTIL.getConfiguration();
    ClusterConnection conn = (ClusterConnection) ConnectionFactory.createConnection(conf);
    Table table = conn.getTable(tableName);
    HRegionServer rs = UTIL.getHBaseCluster().getRegionServer(0);
    Region region = rs.getOnlineRegions(tableName).get(0);
    LOG.debug("Writing some data to " + tableName);
    // write some data
    Put p = new Put(Bytes.toBytes("row"));
    p.addColumn(family, qualifier, Bytes.toBytes("value1"));
    table.put(p);
    // get the current load on RS. Hopefully memstore isn't flushed since we wrote the the data
    int load = (int) ((((HRegion) region).addAndGetMemstoreSize(new MemstoreSize(0, 0)) * 100) / flushSizeBytes);
    LOG.debug("Done writing some data to " + tableName);
    // get the stats for the region hosting our table
    ClientBackoffPolicy backoffPolicy = conn.getBackoffPolicy();
    assertTrue("Backoff policy is not correctly configured", backoffPolicy instanceof ExponentialClientBackoffPolicy);
    ServerStatisticTracker stats = conn.getStatisticsTracker();
    assertNotNull("No stats configured for the client!", stats);
    // get the names so we can query the stats
    ServerName server = rs.getServerName();
    byte[] regionName = region.getRegionInfo().getRegionName();
    // check to see we found some load on the memstore
    ServerStatistics serverStats = stats.getServerStatsForTesting(server);
    ServerStatistics.RegionStatistics regionStats = serverStats.getStatsForRegion(regionName);
    assertEquals("We did not find some load on the memstore", load, regionStats.getMemstoreLoadPercent());
    // check that the load reported produces a nonzero delay
    long backoffTime = backoffPolicy.getBackoffTime(server, regionName, serverStats);
    assertNotEquals("Reported load does not produce a backoff", backoffTime, 0);
    LOG.debug("Backoff calculated for " + region.getRegionInfo().getRegionNameAsString() + " @ " + server + " is " + backoffTime);
    // Reach into the connection and submit work directly to AsyncProcess so we can
    // monitor how long the submission was delayed via a callback
    List<Row> ops = new ArrayList<>(1);
    ops.add(p);
    final CountDownLatch latch = new CountDownLatch(1);
    final AtomicLong endTime = new AtomicLong();
    long startTime = EnvironmentEdgeManager.currentTime();
    BufferedMutatorImpl mutator = ((HTable) table).mutator;
    Batch.Callback<Result> callback = (byte[] r, byte[] row, Result result) -> {
        endTime.set(EnvironmentEdgeManager.currentTime());
        latch.countDown();
    };
    AsyncProcessTask<Result> task = AsyncProcessTask.newBuilder(callback).setPool(mutator.getPool()).setTableName(tableName).setRowAccess(ops).setSubmittedRows(AsyncProcessTask.SubmittedRows.AT_LEAST_ONE).setOperationTimeout(conn.getConnectionConfiguration().getOperationTimeout()).setRpcTimeout(60 * 1000).build();
    mutator.getAsyncProcess().submit(task);
    // Currently the ExponentialClientBackoffPolicy under these test conditions
    // produces a backoffTime of 151 milliseconds. This is long enough so the
    // wait and related checks below are reasonable. Revisit if the backoff
    // time reported by above debug logging has significantly deviated.
    String name = server.getServerName() + "," + Bytes.toStringBinary(regionName);
    MetricsConnection.RegionStats rsStats = conn.getConnectionMetrics().serverStats.get(server).get(regionName);
    assertEquals(name, rsStats.name);
    assertEquals(rsStats.heapOccupancyHist.getSnapshot().getMean(), (double) regionStats.getHeapOccupancyPercent(), 0.1);
    assertEquals(rsStats.memstoreLoadHist.getSnapshot().getMean(), (double) regionStats.getMemstoreLoadPercent(), 0.1);
    MetricsConnection.RunnerStats runnerStats = conn.getConnectionMetrics().runnerStats;
    assertEquals(runnerStats.delayRunners.getCount(), 1);
    assertEquals(runnerStats.normalRunners.getCount(), 1);
    assertEquals("", runnerStats.delayIntevalHist.getSnapshot().getMean(), (double) backoffTime, 0.1);
    latch.await(backoffTime * 2, TimeUnit.MILLISECONDS);
    assertNotEquals("AsyncProcess did not submit the work time", endTime.get(), 0);
    assertTrue("AsyncProcess did not delay long enough", endTime.get() - startTime >= backoffTime);
}
Also used : ClientBackoffPolicy(org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy) ExponentialClientBackoffPolicy(org.apache.hadoop.hbase.client.backoff.ExponentialClientBackoffPolicy) Configuration(org.apache.hadoop.conf.Configuration) ServerStatistics(org.apache.hadoop.hbase.client.backoff.ServerStatistics) ArrayList(java.util.ArrayList) Batch(org.apache.hadoop.hbase.client.coprocessor.Batch) ExponentialClientBackoffPolicy(org.apache.hadoop.hbase.client.backoff.ExponentialClientBackoffPolicy) MemstoreSize(org.apache.hadoop.hbase.regionserver.MemstoreSize) CountDownLatch(java.util.concurrent.CountDownLatch) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer) AtomicLong(java.util.concurrent.atomic.AtomicLong) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Region(org.apache.hadoop.hbase.regionserver.Region) Test(org.junit.Test)

Example 2 with MemstoreSize

use of org.apache.hadoop.hbase.regionserver.MemstoreSize in project hbase by apache.

the class AbstractTestWALReplay method testReplayEditsWrittenViaHRegion.

/**
   * Test writing edits into an HRegion, closing it, splitting logs, opening
   * Region again.  Verify seqids.
   * @throws IOException
   * @throws IllegalAccessException
   * @throws NoSuchFieldException
   * @throws IllegalArgumentException
   * @throws SecurityException
   */
@Test
public void testReplayEditsWrittenViaHRegion() throws IOException, SecurityException, IllegalArgumentException, NoSuchFieldException, IllegalAccessException, InterruptedException {
    final TableName tableName = TableName.valueOf("testReplayEditsWrittenViaHRegion");
    final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
    final Path basedir = FSUtils.getTableDir(this.hbaseRootDir, tableName);
    deleteDir(basedir);
    final byte[] rowName = tableName.getName();
    final int countPerFamily = 10;
    final HTableDescriptor htd = createBasic3FamilyHTD(tableName);
    HRegion region3 = HBaseTestingUtility.createRegionAndWAL(hri, hbaseRootDir, this.conf, htd);
    HBaseTestingUtility.closeRegionAndWAL(region3);
    // Write countPerFamily edits into the three families.  Do a flush on one
    // of the families during the load of edits so its seqid is not same as
    // others to test we do right thing when different seqids.
    WAL wal = createWAL(this.conf, hbaseRootDir, logName);
    HRegion region = HRegion.openHRegion(this.conf, this.fs, hbaseRootDir, hri, htd, wal);
    long seqid = region.getOpenSeqNum();
    boolean first = true;
    for (HColumnDescriptor hcd : htd.getFamilies()) {
        addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region, "x");
        if (first) {
            // If first, so we have at least one family w/ different seqid to rest.
            region.flush(true);
            first = false;
        }
    }
    // Now assert edits made it in.
    final Get g = new Get(rowName);
    Result result = region.get(g);
    assertEquals(countPerFamily * htd.getFamilies().size(), result.size());
    // Now close the region (without flush), split the log, reopen the region and assert that
    // replay of log has the correct effect, that our seqids are calculated correctly so
    // all edits in logs are seen as 'stale'/old.
    region.close(true);
    wal.shutdown();
    runWALSplit(this.conf);
    WAL wal2 = createWAL(this.conf, hbaseRootDir, logName);
    HRegion region2 = HRegion.openHRegion(conf, this.fs, hbaseRootDir, hri, htd, wal2);
    long seqid2 = region2.getOpenSeqNum();
    assertTrue(seqid + result.size() < seqid2);
    final Result result1b = region2.get(g);
    assertEquals(result.size(), result1b.size());
    // correctly when region is opened again.
    for (HColumnDescriptor hcd : htd.getFamilies()) {
        addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region2, "y");
    }
    // Get count of edits.
    final Result result2 = region2.get(g);
    assertEquals(2 * result.size(), result2.size());
    wal2.sync();
    final Configuration newConf = HBaseConfiguration.create(this.conf);
    User user = HBaseTestingUtility.getDifferentUser(newConf, tableName.getNameAsString());
    user.runAs(new PrivilegedExceptionAction<Object>() {

        @Override
        public Object run() throws Exception {
            runWALSplit(newConf);
            FileSystem newFS = FileSystem.get(newConf);
            // Make a new wal for new region open.
            WAL wal3 = createWAL(newConf, hbaseRootDir, logName);
            final AtomicInteger countOfRestoredEdits = new AtomicInteger(0);
            HRegion region3 = new HRegion(basedir, wal3, newFS, newConf, hri, htd, null) {

                @Override
                protected void restoreEdit(HStore s, Cell cell, MemstoreSize memstoreSize) {
                    super.restoreEdit(s, cell, memstoreSize);
                    countOfRestoredEdits.incrementAndGet();
                }
            };
            long seqid3 = region3.initialize();
            Result result3 = region3.get(g);
            // Assert that count of cells is same as before crash.
            assertEquals(result2.size(), result3.size());
            assertEquals(htd.getFamilies().size() * countPerFamily, countOfRestoredEdits.get());
            // I can't close wal1.  Its been appropriated when we split.
            region3.close();
            wal3.close();
            return null;
        }
    });
}
Also used : Path(org.apache.hadoop.fs.Path) WAL(org.apache.hadoop.hbase.wal.WAL) User(org.apache.hadoop.hbase.security.User) Configuration(org.apache.hadoop.conf.Configuration) MemstoreSize(org.apache.hadoop.hbase.regionserver.MemstoreSize) IOException(java.io.IOException) Result(org.apache.hadoop.hbase.client.Result) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Get(org.apache.hadoop.hbase.client.Get) FileSystem(org.apache.hadoop.fs.FileSystem) HStore(org.apache.hadoop.hbase.regionserver.HStore) Test(org.junit.Test)

Aggregations

Configuration (org.apache.hadoop.conf.Configuration)2 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)2 MemstoreSize (org.apache.hadoop.hbase.regionserver.MemstoreSize)2 Test (org.junit.Test)2 IOException (java.io.IOException)1 ArrayList (java.util.ArrayList)1 CountDownLatch (java.util.concurrent.CountDownLatch)1 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)1 AtomicLong (java.util.concurrent.atomic.AtomicLong)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 Path (org.apache.hadoop.fs.Path)1 Get (org.apache.hadoop.hbase.client.Get)1 Result (org.apache.hadoop.hbase.client.Result)1 ClientBackoffPolicy (org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy)1 ExponentialClientBackoffPolicy (org.apache.hadoop.hbase.client.backoff.ExponentialClientBackoffPolicy)1 ServerStatistics (org.apache.hadoop.hbase.client.backoff.ServerStatistics)1 Batch (org.apache.hadoop.hbase.client.coprocessor.Batch)1 HRegionServer (org.apache.hadoop.hbase.regionserver.HRegionServer)1 HStore (org.apache.hadoop.hbase.regionserver.HStore)1 Region (org.apache.hadoop.hbase.regionserver.Region)1