Search in sources :

Example 1 with BaseSource

use of org.apache.hadoop.hbase.metrics.BaseSource in project hbase by apache.

the class TestMultiRespectsLimits method testMultiLimits.

@Test
public void testMultiLimits() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    Table t = TEST_UTIL.createTable(tableName, FAMILY);
    TEST_UTIL.loadTable(t, FAMILY, false);
    // Split the table to make sure that the chunking happens accross regions.
    try (final Admin admin = TEST_UTIL.getAdmin()) {
        admin.split(tableName);
        TEST_UTIL.waitFor(60000, new Waiter.Predicate<Exception>() {

            @Override
            public boolean evaluate() throws Exception {
                return admin.getTableRegions(tableName).size() > 1;
            }
        });
    }
    List<Get> gets = new ArrayList<>(MAX_SIZE);
    for (int i = 0; i < MAX_SIZE; i++) {
        gets.add(new Get(HBaseTestingUtility.ROWS[i]));
    }
    RpcServerInterface rpcServer = TEST_UTIL.getHBaseCluster().getRegionServer(0).getRpcServer();
    BaseSource s = rpcServer.getMetrics().getMetricsSource();
    long startingExceptions = METRICS_ASSERT.getCounter("exceptions", s);
    long startingMultiExceptions = METRICS_ASSERT.getCounter("exceptions.multiResponseTooLarge", s);
    Result[] results = t.get(gets);
    assertEquals(MAX_SIZE, results.length);
    // Cells from TEST_UTIL.loadTable have a length of 27.
    // Multiplying by less than that gives an easy lower bound on size.
    // However in reality each kv is being reported as much higher than that.
    METRICS_ASSERT.assertCounterGt("exceptions", startingExceptions + ((MAX_SIZE * 25) / MAX_SIZE), s);
    METRICS_ASSERT.assertCounterGt("exceptions.multiResponseTooLarge", startingMultiExceptions + ((MAX_SIZE * 25) / MAX_SIZE), s);
}
Also used : BaseSource(org.apache.hadoop.hbase.metrics.BaseSource) ArrayList(java.util.ArrayList) TableName(org.apache.hadoop.hbase.TableName) RpcServerInterface(org.apache.hadoop.hbase.ipc.RpcServerInterface) Waiter(org.apache.hadoop.hbase.Waiter) Test(org.junit.Test)

Example 2 with BaseSource

use of org.apache.hadoop.hbase.metrics.BaseSource in project hbase by apache.

the class TestMultiRespectsLimits method testBlockMultiLimits.

@Test
public void testBlockMultiLimits() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    HTableDescriptor desc = new HTableDescriptor(tableName);
    HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
    hcd.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF);
    desc.addFamily(hcd);
    TEST_UTIL.getAdmin().createTable(desc);
    Table t = TEST_UTIL.getConnection().getTable(tableName);
    final HRegionServer regionServer = TEST_UTIL.getHBaseCluster().getRegionServer(0);
    RpcServerInterface rpcServer = regionServer.getRpcServer();
    BaseSource s = rpcServer.getMetrics().getMetricsSource();
    long startingExceptions = METRICS_ASSERT.getCounter("exceptions", s);
    long startingMultiExceptions = METRICS_ASSERT.getCounter("exceptions.multiResponseTooLarge", s);
    byte[] row = Bytes.toBytes("TEST");
    byte[][] cols = new byte[][] { // Get this
    Bytes.toBytes("0"), // Buffer
    Bytes.toBytes("1"), // Buffer
    Bytes.toBytes("2"), // Get This
    Bytes.toBytes("3"), // Buffer
    Bytes.toBytes("4"), // Buffer
    Bytes.toBytes("5") };
    // Set the value size so that one result will be less than the MAX_SIE
    // however the block being reference will be larger than MAX_SIZE.
    // This should cause the regionserver to try and send a result immediately.
    byte[] value = new byte[MAX_SIZE - 100];
    ThreadLocalRandom.current().nextBytes(value);
    for (byte[] col : cols) {
        Put p = new Put(row);
        p.addImmutable(FAMILY, col, value);
        t.put(p);
    }
    // Make sure that a flush happens
    try (final Admin admin = TEST_UTIL.getAdmin()) {
        admin.flush(tableName);
        TEST_UTIL.waitFor(60000, new Waiter.Predicate<Exception>() {

            @Override
            public boolean evaluate() throws Exception {
                return regionServer.getOnlineRegions(tableName).get(0).getMaxFlushedSeqId() > 3;
            }
        });
    }
    List<Get> gets = new ArrayList<>(2);
    Get g0 = new Get(row);
    g0.addColumn(FAMILY, cols[0]);
    gets.add(g0);
    Get g2 = new Get(row);
    g2.addColumn(FAMILY, cols[3]);
    gets.add(g2);
    Result[] results = t.get(gets);
    assertEquals(2, results.length);
    METRICS_ASSERT.assertCounterGt("exceptions", startingExceptions, s);
    METRICS_ASSERT.assertCounterGt("exceptions.multiResponseTooLarge", startingMultiExceptions, s);
}
Also used : BaseSource(org.apache.hadoop.hbase.metrics.BaseSource) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) ArrayList(java.util.ArrayList) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer) TableName(org.apache.hadoop.hbase.TableName) RpcServerInterface(org.apache.hadoop.hbase.ipc.RpcServerInterface) Waiter(org.apache.hadoop.hbase.Waiter) Test(org.junit.Test)

Aggregations

ArrayList (java.util.ArrayList)2 TableName (org.apache.hadoop.hbase.TableName)2 Waiter (org.apache.hadoop.hbase.Waiter)2 RpcServerInterface (org.apache.hadoop.hbase.ipc.RpcServerInterface)2 BaseSource (org.apache.hadoop.hbase.metrics.BaseSource)2 Test (org.junit.Test)2 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)1 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)1 HRegionServer (org.apache.hadoop.hbase.regionserver.HRegionServer)1