Search in sources :

Example 76 with HTableDescriptor

use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.

the class TestMobCloneSnapshotFromClient method createMobTable.

private void createMobTable(final HBaseTestingUtility util, final TableName tableName, final byte[][] splitKeys, int regionReplication, final byte[]... families) throws IOException, InterruptedException {
    HTableDescriptor htd = new HTableDescriptor(tableName);
    htd.setRegionReplication(regionReplication);
    htd.addCoprocessor(DelayFlushCoprocessor.class.getName());
    for (byte[] family : families) {
        HColumnDescriptor hcd = new HColumnDescriptor(family);
        hcd.setMobEnabled(true);
        hcd.setMobThreshold(0L);
        htd.addFamily(hcd);
    }
    util.getAdmin().createTable(htd, splitKeys);
    SnapshotTestingUtils.waitForTableToBeOnline(util, tableName);
    assertEquals((splitKeys.length + 1) * regionReplication, util.getAdmin().getTableRegions(tableName).size());
}
Also used : HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 77 with HTableDescriptor

use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.

the class TestFromClientSide method testScannerFailsAfterRetriesWhenCoprocessorThrowsIOE.

/**
   * Tests the case where a coprocessor throws a regular IOException in the scan. The expectation
   * is that the we will keep on retrying, but fail after the retries are exhausted instead of
   * retrying indefinitely.
   */
@Test(timeout = 180000)
public void testScannerFailsAfterRetriesWhenCoprocessorThrowsIOE() throws IOException, InterruptedException {
    TEST_UTIL.getConfiguration().setBoolean("hbase.client.log.scanner.activity", true);
    final TableName tableName = TableName.valueOf(name.getMethodName());
    TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 3);
    HTableDescriptor htd = TEST_UTIL.createTableDescriptor(tableName, FAMILY);
    htd.addCoprocessor(ExceptionInReseekRegionObserver.class.getName());
    TEST_UTIL.getAdmin().createTable(htd);
    ExceptionInReseekRegionObserver.reset();
    // throw exceptions in every retry
    ExceptionInReseekRegionObserver.throwOnce.set(false);
    try (Table t = TEST_UTIL.getConnection().getTable(tableName)) {
        TEST_UTIL.loadTable(t, FAMILY, false);
        TEST_UTIL.getAdmin().flush(tableName);
        TEST_UTIL.countRows(t, new Scan().addColumn(FAMILY, FAMILY));
        fail("Should have thrown an exception");
    } catch (DoNotRetryIOException expected) {
        assertTrue(expected instanceof ScannerResetException);
    // expected
    }
    assertTrue(ExceptionInReseekRegionObserver.reqCount.get() >= 3);
}
Also used : TableName(org.apache.hadoop.hbase.TableName) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) ScannerResetException(org.apache.hadoop.hbase.exceptions.ScannerResetException) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Test(org.junit.Test)

Example 78 with HTableDescriptor

use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.

the class TestFromClientSide method testClientScannerIsResetWhenScanThrowsIOException.

/**
   * Tests the case where a Scan can throw an IOException in the middle of the seek / reseek
   * leaving the server side RegionScanner to be in dirty state. The client has to ensure that the
   * ClientScanner does not get an exception and also sees all the data.
   * @throws IOException
   * @throws InterruptedException
   */
@Test
public void testClientScannerIsResetWhenScanThrowsIOException() throws IOException, InterruptedException {
    TEST_UTIL.getConfiguration().setBoolean("hbase.client.log.scanner.activity", true);
    final TableName tableName = TableName.valueOf(name.getMethodName());
    HTableDescriptor htd = TEST_UTIL.createTableDescriptor(tableName, FAMILY);
    htd.addCoprocessor(ExceptionInReseekRegionObserver.class.getName());
    TEST_UTIL.getAdmin().createTable(htd);
    ExceptionInReseekRegionObserver.reset();
    // throw exceptions only once
    ExceptionInReseekRegionObserver.throwOnce.set(true);
    try (Table t = TEST_UTIL.getConnection().getTable(tableName)) {
        int rowCount = TEST_UTIL.loadTable(t, FAMILY, false);
        TEST_UTIL.getAdmin().flush(tableName);
        int actualRowCount = TEST_UTIL.countRows(t, new Scan().addColumn(FAMILY, FAMILY));
        assertEquals(rowCount, actualRowCount);
    }
    assertTrue(ExceptionInReseekRegionObserver.reqCount.get() > 0);
}
Also used : TableName(org.apache.hadoop.hbase.TableName) MultiRowMutationEndpoint(org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Test(org.junit.Test)

Example 79 with HTableDescriptor

use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.

the class TestFromClientSide method testKeepDeletedCells.

/**
   * Basic client side validation of HBASE-4536
   */
@Test
public void testKeepDeletedCells() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    final byte[] FAMILY = Bytes.toBytes("family");
    final byte[] C0 = Bytes.toBytes("c0");
    final byte[] T1 = Bytes.toBytes("T1");
    final byte[] T2 = Bytes.toBytes("T2");
    final byte[] T3 = Bytes.toBytes("T3");
    HColumnDescriptor hcd = new HColumnDescriptor(FAMILY).setKeepDeletedCells(KeepDeletedCells.TRUE).setDataBlockEncoding(DataBlockEncoding.PREFIX_TREE).setMaxVersions(3);
    HTableDescriptor desc = new HTableDescriptor(tableName);
    desc.addFamily(hcd);
    TEST_UTIL.getAdmin().createTable(desc);
    Table h = TEST_UTIL.getConnection().getTable(tableName);
    long ts = System.currentTimeMillis();
    Put p = new Put(T1, ts);
    p.addColumn(FAMILY, C0, T1);
    h.put(p);
    p = new Put(T1, ts + 2);
    p.addColumn(FAMILY, C0, T2);
    h.put(p);
    p = new Put(T1, ts + 4);
    p.addColumn(FAMILY, C0, T3);
    h.put(p);
    Delete d = new Delete(T1, ts + 3);
    h.delete(d);
    d = new Delete(T1, ts + 3);
    d.addColumns(FAMILY, C0, ts + 3);
    h.delete(d);
    Get g = new Get(T1);
    // does *not* include the delete
    g.setTimeRange(0, ts + 3);
    Result r = h.get(g);
    assertArrayEquals(T2, r.getValue(FAMILY, C0));
    Scan s = new Scan(T1);
    s.setTimeRange(0, ts + 3);
    s.setMaxVersions();
    ResultScanner scanner = h.getScanner(s);
    Cell[] kvs = scanner.next().rawCells();
    assertArrayEquals(T2, CellUtil.cloneValue(kvs[0]));
    assertArrayEquals(T1, CellUtil.cloneValue(kvs[1]));
    scanner.close();
    s = new Scan(T1);
    s.setRaw(true);
    s.setMaxVersions();
    scanner = h.getScanner(s);
    kvs = scanner.next().rawCells();
    assertTrue(CellUtil.isDeleteFamily(kvs[0]));
    assertArrayEquals(T3, CellUtil.cloneValue(kvs[1]));
    assertTrue(CellUtil.isDelete(kvs[2]));
    assertArrayEquals(T2, CellUtil.cloneValue(kvs[3]));
    assertArrayEquals(T1, CellUtil.cloneValue(kvs[4]));
    scanner.close();
    h.close();
}
Also used : HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) TableName(org.apache.hadoop.hbase.TableName) Cell(org.apache.hadoop.hbase.Cell) Test(org.junit.Test)

Example 80 with HTableDescriptor

use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.

the class TestHCM method testPutOperationTimeout.

@Test
public void testPutOperationTimeout() throws Exception {
    HTableDescriptor hdt = TEST_UTIL.createTableDescriptor(TableName.valueOf(name.getMethodName()));
    hdt.addCoprocessor(SleepAndFailFirstTime.class.getName());
    Table table = TEST_UTIL.createTable(hdt, new byte[][] { FAM_NAM }, TEST_UTIL.getConfiguration());
    table.setRpcTimeout(Integer.MAX_VALUE);
    SleepAndFailFirstTime.ct.set(0);
    // Check that it works if the timeout is big enough
    table.setOperationTimeout(120 * 1000);
    table.put(new Put(FAM_NAM).addColumn(FAM_NAM, FAM_NAM, FAM_NAM));
    // Resetting and retrying. Will fail this time, not enough time for the second try
    SleepAndFailFirstTime.ct.set(0);
    try {
        table.setOperationTimeout(30 * 1000);
        table.put(new Put(FAM_NAM).addColumn(FAM_NAM, FAM_NAM, FAM_NAM));
        Assert.fail("We expect an exception here");
    } catch (RetriesExhaustedWithDetailsException e) {
        // The client has a CallTimeout class, but it's not shared.We're not very clean today,
        //  in the general case you can expect the call to stop, but the exception may vary.
        // In this test however, we're sure that it will be a socket timeout.
        LOG.info("We received an exception, as expected ", e);
    } catch (IOException e) {
        Assert.fail("Wrong exception:" + e.getMessage());
    } finally {
        table.close();
    }
}
Also used : IOException(java.io.IOException) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Test(org.junit.Test)

Aggregations

HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)867 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)555 Test (org.junit.Test)425 TableName (org.apache.hadoop.hbase.TableName)258 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)171 IOException (java.io.IOException)167 Put (org.apache.hadoop.hbase.client.Put)149 Table (org.apache.hadoop.hbase.client.Table)134 Path (org.apache.hadoop.fs.Path)127 Admin (org.apache.hadoop.hbase.client.Admin)121 Configuration (org.apache.hadoop.conf.Configuration)87 HBaseAdmin (org.apache.hadoop.hbase.client.HBaseAdmin)77 ArrayList (java.util.ArrayList)75 FileSystem (org.apache.hadoop.fs.FileSystem)66 Result (org.apache.hadoop.hbase.client.Result)62 Connection (org.apache.hadoop.hbase.client.Connection)57 Scan (org.apache.hadoop.hbase.client.Scan)51 Cell (org.apache.hadoop.hbase.Cell)44 Delete (org.apache.hadoop.hbase.client.Delete)44 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)43