Search in sources :

Example 16 with Increment

use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.

the class TestHRegion method testCellTTLs.

@Test
public void testCellTTLs() throws IOException {
    IncrementingEnvironmentEdge edge = new IncrementingEnvironmentEdge();
    EnvironmentEdgeManager.injectEdge(edge);
    final byte[] row = Bytes.toBytes("testRow");
    final byte[] q1 = Bytes.toBytes("q1");
    final byte[] q2 = Bytes.toBytes("q2");
    final byte[] q3 = Bytes.toBytes("q3");
    final byte[] q4 = Bytes.toBytes("q4");
    // 10 seconds
    TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(fam1).setTimeToLive(10).build()).build();
    Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
    conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MIN_FORMAT_VERSION_WITH_TAGS);
    region = HBaseTestingUtil.createRegionAndWAL(RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build(), TEST_UTIL.getDataTestDir(), conf, tableDescriptor);
    assertNotNull(region);
    long now = EnvironmentEdgeManager.currentTime();
    // Add a cell that will expire in 5 seconds via cell TTL
    region.put(new Put(row).add(new KeyValue(row, fam1, q1, now, HConstants.EMPTY_BYTE_ARRAY, new ArrayBackedTag[] { // TTL tags specify ts in milliseconds
    new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(5000L)) })));
    // Add a cell that will expire after 10 seconds via family setting
    region.put(new Put(row).addColumn(fam1, q2, now, HConstants.EMPTY_BYTE_ARRAY));
    // Add a cell that will expire in 15 seconds via cell TTL
    region.put(new Put(row).add(new KeyValue(row, fam1, q3, now + 10000 - 1, HConstants.EMPTY_BYTE_ARRAY, new ArrayBackedTag[] { // TTL tags specify ts in milliseconds
    new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(5000L)) })));
    // Add a cell that will expire in 20 seconds via family setting
    region.put(new Put(row).addColumn(fam1, q4, now + 10000 - 1, HConstants.EMPTY_BYTE_ARRAY));
    // Flush so we are sure store scanning gets this right
    region.flush(true);
    // A query at time T+0 should return all cells
    Result r = region.get(new Get(row));
    assertNotNull(r.getValue(fam1, q1));
    assertNotNull(r.getValue(fam1, q2));
    assertNotNull(r.getValue(fam1, q3));
    assertNotNull(r.getValue(fam1, q4));
    // Increment time to T+5 seconds
    edge.incrementTime(5000);
    r = region.get(new Get(row));
    assertNull(r.getValue(fam1, q1));
    assertNotNull(r.getValue(fam1, q2));
    assertNotNull(r.getValue(fam1, q3));
    assertNotNull(r.getValue(fam1, q4));
    // Increment time to T+10 seconds
    edge.incrementTime(5000);
    r = region.get(new Get(row));
    assertNull(r.getValue(fam1, q1));
    assertNull(r.getValue(fam1, q2));
    assertNotNull(r.getValue(fam1, q3));
    assertNotNull(r.getValue(fam1, q4));
    // Increment time to T+15 seconds
    edge.incrementTime(5000);
    r = region.get(new Get(row));
    assertNull(r.getValue(fam1, q1));
    assertNull(r.getValue(fam1, q2));
    assertNull(r.getValue(fam1, q3));
    assertNotNull(r.getValue(fam1, q4));
    // Increment time to T+20 seconds
    edge.incrementTime(10000);
    r = region.get(new Get(row));
    assertNull(r.getValue(fam1, q1));
    assertNull(r.getValue(fam1, q2));
    assertNull(r.getValue(fam1, q3));
    assertNull(r.getValue(fam1, q4));
    // Fun with disappearing increments
    // Start at 1
    region.put(new Put(row).addColumn(fam1, q1, Bytes.toBytes(1L)));
    r = region.get(new Get(row));
    byte[] val = r.getValue(fam1, q1);
    assertNotNull(val);
    assertEquals(1L, Bytes.toLong(val));
    // Increment with a TTL of 5 seconds
    Increment incr = new Increment(row).addColumn(fam1, q1, 1L);
    incr.setTTL(5000);
    // 2
    region.increment(incr);
    // New value should be 2
    r = region.get(new Get(row));
    val = r.getValue(fam1, q1);
    assertNotNull(val);
    assertEquals(2L, Bytes.toLong(val));
    // Increment time to T+25 seconds
    edge.incrementTime(5000);
    // Value should be back to 1
    r = region.get(new Get(row));
    val = r.getValue(fam1, q1);
    assertNotNull(val);
    assertEquals(1L, Bytes.toLong(val));
    // Increment time to T+30 seconds
    edge.incrementTime(5000);
    // Original value written at T+20 should be gone now via family TTL
    r = region.get(new Get(row));
    assertNull(r.getValue(fam1, q1));
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) Get(org.apache.hadoop.hbase.client.Get) Increment(org.apache.hadoop.hbase.client.Increment) ArrayBackedTag(org.apache.hadoop.hbase.ArrayBackedTag) IncrementingEnvironmentEdge(org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Put(org.apache.hadoop.hbase.client.Put) CheckAndMutateResult(org.apache.hadoop.hbase.client.CheckAndMutateResult) Result(org.apache.hadoop.hbase.client.Result) Test(org.junit.Test)

Example 17 with Increment

use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.

the class TestThriftHBaseServiceHandler method testDurability.

/**
 * Create TPut, TDelete , TIncrement objects, set durability then call ThriftUtility
 * functions to get Put , Delete and Increment respectively. Use getDurability to make sure
 * the returned objects have the appropriate durability setting.
 */
@Test
public void testDurability() throws Exception {
    byte[] rowName = Bytes.toBytes("testDurability");
    List<TColumnValue> columnValues = new ArrayList<>(1);
    columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname)));
    List<TColumnIncrement> incrementColumns = new ArrayList<>(1);
    incrementColumns.add(new TColumnIncrement(wrap(familyAname), wrap(qualifierAname)));
    TDelete tDelete = new TDelete(wrap(rowName));
    tDelete.setDurability(TDurability.SKIP_WAL);
    Delete delete = deleteFromThrift(tDelete);
    assertEquals(Durability.SKIP_WAL, delete.getDurability());
    tDelete.setDurability(TDurability.ASYNC_WAL);
    delete = deleteFromThrift(tDelete);
    assertEquals(Durability.ASYNC_WAL, delete.getDurability());
    tDelete.setDurability(TDurability.SYNC_WAL);
    delete = deleteFromThrift(tDelete);
    assertEquals(Durability.SYNC_WAL, delete.getDurability());
    tDelete.setDurability(TDurability.FSYNC_WAL);
    delete = deleteFromThrift(tDelete);
    assertEquals(Durability.FSYNC_WAL, delete.getDurability());
    TPut tPut = new TPut(wrap(rowName), columnValues);
    tPut.setDurability(TDurability.SKIP_WAL);
    Put put = putFromThrift(tPut);
    assertEquals(Durability.SKIP_WAL, put.getDurability());
    tPut.setDurability(TDurability.ASYNC_WAL);
    put = putFromThrift(tPut);
    assertEquals(Durability.ASYNC_WAL, put.getDurability());
    tPut.setDurability(TDurability.SYNC_WAL);
    put = putFromThrift(tPut);
    assertEquals(Durability.SYNC_WAL, put.getDurability());
    tPut.setDurability(TDurability.FSYNC_WAL);
    put = putFromThrift(tPut);
    assertEquals(Durability.FSYNC_WAL, put.getDurability());
    TIncrement tIncrement = new TIncrement(wrap(rowName), incrementColumns);
    tIncrement.setDurability(TDurability.SKIP_WAL);
    Increment increment = incrementFromThrift(tIncrement);
    assertEquals(Durability.SKIP_WAL, increment.getDurability());
    tIncrement.setDurability(TDurability.ASYNC_WAL);
    increment = incrementFromThrift(tIncrement);
    assertEquals(Durability.ASYNC_WAL, increment.getDurability());
    tIncrement.setDurability(TDurability.SYNC_WAL);
    increment = incrementFromThrift(tIncrement);
    assertEquals(Durability.SYNC_WAL, increment.getDurability());
    tIncrement.setDurability(TDurability.FSYNC_WAL);
    increment = incrementFromThrift(tIncrement);
    assertEquals(Durability.FSYNC_WAL, increment.getDurability());
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) TDelete(org.apache.hadoop.hbase.thrift2.generated.TDelete) TColumnIncrement(org.apache.hadoop.hbase.thrift2.generated.TColumnIncrement) TIncrement(org.apache.hadoop.hbase.thrift2.generated.TIncrement) Increment(org.apache.hadoop.hbase.client.Increment) TColumnIncrement(org.apache.hadoop.hbase.thrift2.generated.TColumnIncrement) ArrayList(java.util.ArrayList) TDelete(org.apache.hadoop.hbase.thrift2.generated.TDelete) TIncrement(org.apache.hadoop.hbase.thrift2.generated.TIncrement) TColumnValue(org.apache.hadoop.hbase.thrift2.generated.TColumnValue) TPut(org.apache.hadoop.hbase.thrift2.generated.TPut) TPut(org.apache.hadoop.hbase.thrift2.generated.TPut) Put(org.apache.hadoop.hbase.client.Put) Test(org.junit.Test)

Example 18 with Increment

use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.

the class SpaceQuotaHelperForTests method verifyNoViolation.

/**
 * Verifies that no policy has been violated on the given table
 */
void verifyNoViolation(TableName tn, Mutation m) throws Exception {
    // But let's try a few times to write data before failing
    boolean sawSuccess = false;
    for (int i = 0; i < NUM_RETRIES && !sawSuccess; i++) {
        try (Table table = testUtil.getConnection().getTable(tn)) {
            if (m instanceof Put) {
                table.put((Put) m);
            } else if (m instanceof Delete) {
                table.delete((Delete) m);
            } else if (m instanceof Append) {
                table.append((Append) m);
            } else if (m instanceof Increment) {
                table.increment((Increment) m);
            } else {
                fail("Failed to apply " + m.getClass().getSimpleName() + " to the table." + " Programming error");
            }
            sawSuccess = true;
        } catch (Exception e) {
            LOG.info("Rejected the " + m.getClass().getSimpleName() + ", will sleep and retry");
            Thread.sleep(2000);
        }
    }
    if (!sawSuccess) {
        try (Table quotaTable = testUtil.getConnection().getTable(QuotaUtil.QUOTA_TABLE_NAME)) {
            ResultScanner scanner = quotaTable.getScanner(new Scan());
            Result result = null;
            LOG.info("Dumping contents of hbase:quota table");
            while ((result = scanner.next()) != null) {
                LOG.info(Bytes.toString(result.getRow()) + " => " + result.toString());
            }
            scanner.close();
        }
    }
    assertTrue("Expected to succeed in writing data to a table not having quota ", sawSuccess);
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) Table(org.apache.hadoop.hbase.client.Table) Append(org.apache.hadoop.hbase.client.Append) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) Increment(org.apache.hadoop.hbase.client.Increment) Scan(org.apache.hadoop.hbase.client.Scan) Put(org.apache.hadoop.hbase.client.Put) TableNotEnabledException(org.apache.hadoop.hbase.TableNotEnabledException) IOException(java.io.IOException) Result(org.apache.hadoop.hbase.client.Result)

Example 19 with Increment

use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.

the class SpaceQuotaHelperForTests method verifyViolation.

/**
 * Verifies that the given policy on the given table has been violated
 */
void verifyViolation(SpaceViolationPolicy policyToViolate, TableName tn, Mutation m) throws Exception {
    // But let's try a few times to get the exception before failing
    boolean sawError = false;
    String msg = "";
    for (int i = 0; i < NUM_RETRIES && !sawError; i++) {
        try (Table table = testUtil.getConnection().getTable(tn)) {
            if (m instanceof Put) {
                table.put((Put) m);
            } else if (m instanceof Delete) {
                table.delete((Delete) m);
            } else if (m instanceof Append) {
                table.append((Append) m);
            } else if (m instanceof Increment) {
                table.increment((Increment) m);
            } else {
                fail("Failed to apply " + m.getClass().getSimpleName() + " to the table. Programming error");
            }
            LOG.info("Did not reject the " + m.getClass().getSimpleName() + ", will sleep and retry");
            Thread.sleep(2000);
        } catch (Exception e) {
            msg = StringUtils.stringifyException(e);
            if ((policyToViolate.equals(SpaceViolationPolicy.DISABLE) && e instanceof TableNotEnabledException) || msg.contains(policyToViolate.name())) {
                LOG.info("Got the expected exception={}", msg);
                sawError = true;
                break;
            } else {
                LOG.warn("Did not get the expected exception, will sleep and retry", e);
                Thread.sleep(2000);
            }
        }
    }
    if (!sawError) {
        try (Table quotaTable = testUtil.getConnection().getTable(QuotaUtil.QUOTA_TABLE_NAME)) {
            ResultScanner scanner = quotaTable.getScanner(new Scan());
            Result result = null;
            LOG.info("Dumping contents of hbase:quota table");
            while ((result = scanner.next()) != null) {
                LOG.info(Bytes.toString(result.getRow()) + " => " + result.toString());
            }
            scanner.close();
        }
    } else {
        if (policyToViolate.equals(SpaceViolationPolicy.DISABLE)) {
            assertTrue(msg.contains("TableNotEnabledException") || msg.contains(policyToViolate.name()));
        } else {
            assertTrue("Expected exception message to contain the word '" + policyToViolate.name() + "', but was " + msg, msg.contains(policyToViolate.name()));
        }
    }
    assertTrue("Expected to see an exception writing data to a table exceeding its quota", sawError);
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) Table(org.apache.hadoop.hbase.client.Table) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) Put(org.apache.hadoop.hbase.client.Put) TableNotEnabledException(org.apache.hadoop.hbase.TableNotEnabledException) IOException(java.io.IOException) Result(org.apache.hadoop.hbase.client.Result) Append(org.apache.hadoop.hbase.client.Append) Increment(org.apache.hadoop.hbase.client.Increment) Scan(org.apache.hadoop.hbase.client.Scan) TableNotEnabledException(org.apache.hadoop.hbase.TableNotEnabledException)

Example 20 with Increment

use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.

the class TestTags method testTagsWithAppendAndIncrement.

@Test
public void testTagsWithAppendAndIncrement() throws Exception {
    TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
    byte[] f = Bytes.toBytes("f");
    byte[] q = Bytes.toBytes("q");
    byte[] row1 = Bytes.toBytes("r1");
    byte[] row2 = Bytes.toBytes("r2");
    TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder.of(f)).build();
    TEST_UTIL.getAdmin().createTable(tableDescriptor);
    Table table = null;
    try {
        table = TEST_UTIL.getConnection().getTable(tableName);
        Put put = new Put(row1);
        byte[] v = Bytes.toBytes(2L);
        put.addColumn(f, q, v);
        put.setAttribute("visibility", Bytes.toBytes("tag1"));
        table.put(put);
        Increment increment = new Increment(row1);
        increment.addColumn(f, q, 1L);
        table.increment(increment);
        TestCoprocessorForTags.checkTagPresence = true;
        ResultScanner scanner = table.getScanner(new Scan());
        Result result = scanner.next();
        KeyValue kv = KeyValueUtil.ensureKeyValue(result.getColumnLatestCell(f, q));
        List<Tag> tags = TestCoprocessorForTags.tags;
        assertEquals(3L, Bytes.toLong(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()));
        assertEquals(1, tags.size());
        assertEquals("tag1", Bytes.toString(Tag.cloneValue(tags.get(0))));
        TestCoprocessorForTags.checkTagPresence = false;
        TestCoprocessorForTags.tags = null;
        increment = new Increment(row1);
        increment.add(new KeyValue(row1, f, q, 1234L, v));
        increment.setAttribute("visibility", Bytes.toBytes("tag2"));
        table.increment(increment);
        TestCoprocessorForTags.checkTagPresence = true;
        scanner = table.getScanner(new Scan());
        result = scanner.next();
        kv = KeyValueUtil.ensureKeyValue(result.getColumnLatestCell(f, q));
        tags = TestCoprocessorForTags.tags;
        assertEquals(5L, Bytes.toLong(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()));
        assertEquals(2, tags.size());
        // We cannot assume the ordering of tags
        List<String> tagValues = new ArrayList<>();
        for (Tag tag : tags) {
            tagValues.add(Bytes.toString(Tag.cloneValue(tag)));
        }
        assertTrue(tagValues.contains("tag1"));
        assertTrue(tagValues.contains("tag2"));
        TestCoprocessorForTags.checkTagPresence = false;
        TestCoprocessorForTags.tags = null;
        put = new Put(row2);
        v = Bytes.toBytes(2L);
        put.addColumn(f, q, v);
        table.put(put);
        increment = new Increment(row2);
        increment.add(new KeyValue(row2, f, q, 1234L, v));
        increment.setAttribute("visibility", Bytes.toBytes("tag2"));
        table.increment(increment);
        TestCoprocessorForTags.checkTagPresence = true;
        scanner = table.getScanner(new Scan().withStartRow(row2));
        result = scanner.next();
        kv = KeyValueUtil.ensureKeyValue(result.getColumnLatestCell(f, q));
        tags = TestCoprocessorForTags.tags;
        assertEquals(4L, Bytes.toLong(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()));
        assertEquals(1, tags.size());
        assertEquals("tag2", Bytes.toString(Tag.cloneValue(tags.get(0))));
        TestCoprocessorForTags.checkTagPresence = false;
        TestCoprocessorForTags.tags = null;
        // Test Append
        byte[] row3 = Bytes.toBytes("r3");
        put = new Put(row3);
        put.addColumn(f, q, Bytes.toBytes("a"));
        put.setAttribute("visibility", Bytes.toBytes("tag1"));
        table.put(put);
        Append append = new Append(row3);
        append.addColumn(f, q, Bytes.toBytes("b"));
        table.append(append);
        TestCoprocessorForTags.checkTagPresence = true;
        scanner = table.getScanner(new Scan().withStartRow(row3));
        result = scanner.next();
        kv = KeyValueUtil.ensureKeyValue(result.getColumnLatestCell(f, q));
        tags = TestCoprocessorForTags.tags;
        assertEquals(1, tags.size());
        assertEquals("tag1", Bytes.toString(Tag.cloneValue(tags.get(0))));
        TestCoprocessorForTags.checkTagPresence = false;
        TestCoprocessorForTags.tags = null;
        append = new Append(row3);
        append.add(new KeyValue(row3, f, q, 1234L, v));
        append.setAttribute("visibility", Bytes.toBytes("tag2"));
        table.append(append);
        TestCoprocessorForTags.checkTagPresence = true;
        scanner = table.getScanner(new Scan().withStartRow(row3));
        result = scanner.next();
        kv = KeyValueUtil.ensureKeyValue(result.getColumnLatestCell(f, q));
        tags = TestCoprocessorForTags.tags;
        assertEquals(2, tags.size());
        // We cannot assume the ordering of tags
        tagValues.clear();
        for (Tag tag : tags) {
            tagValues.add(Bytes.toString(Tag.cloneValue(tag)));
        }
        assertTrue(tagValues.contains("tag1"));
        assertTrue(tagValues.contains("tag2"));
        TestCoprocessorForTags.checkTagPresence = false;
        TestCoprocessorForTags.tags = null;
        byte[] row4 = Bytes.toBytes("r4");
        put = new Put(row4);
        put.addColumn(f, q, Bytes.toBytes("a"));
        table.put(put);
        append = new Append(row4);
        append.add(new KeyValue(row4, f, q, 1234L, v));
        append.setAttribute("visibility", Bytes.toBytes("tag2"));
        table.append(append);
        TestCoprocessorForTags.checkTagPresence = true;
        scanner = table.getScanner(new Scan().withStartRow(row4));
        result = scanner.next();
        kv = KeyValueUtil.ensureKeyValue(result.getColumnLatestCell(f, q));
        tags = TestCoprocessorForTags.tags;
        assertEquals(1, tags.size());
        assertEquals("tag2", Bytes.toString(Tag.cloneValue(tags.get(0))));
    } finally {
        TestCoprocessorForTags.checkTagPresence = false;
        TestCoprocessorForTags.tags = null;
        if (table != null) {
            table.close();
        }
    }
}
Also used : Table(org.apache.hadoop.hbase.client.Table) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) KeyValue(org.apache.hadoop.hbase.KeyValue) ArrayList(java.util.ArrayList) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result) TableName(org.apache.hadoop.hbase.TableName) Append(org.apache.hadoop.hbase.client.Append) Increment(org.apache.hadoop.hbase.client.Increment) Scan(org.apache.hadoop.hbase.client.Scan) ArrayBackedTag(org.apache.hadoop.hbase.ArrayBackedTag) Tag(org.apache.hadoop.hbase.Tag) Test(org.junit.Test)

Aggregations

Increment (org.apache.hadoop.hbase.client.Increment)81 Test (org.junit.Test)42 Put (org.apache.hadoop.hbase.client.Put)31 Append (org.apache.hadoop.hbase.client.Append)25 Result (org.apache.hadoop.hbase.client.Result)25 Delete (org.apache.hadoop.hbase.client.Delete)21 Get (org.apache.hadoop.hbase.client.Get)19 IOException (java.io.IOException)16 TableName (org.apache.hadoop.hbase.TableName)15 Table (org.apache.hadoop.hbase.client.Table)15 ArrayList (java.util.ArrayList)14 Cell (org.apache.hadoop.hbase.Cell)11 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)11 CheckAndMutateResult (org.apache.hadoop.hbase.client.CheckAndMutateResult)9 Mutation (org.apache.hadoop.hbase.client.Mutation)9 RowMutations (org.apache.hadoop.hbase.client.RowMutations)9 List (java.util.List)8 Map (java.util.Map)8 Scan (org.apache.hadoop.hbase.client.Scan)7 KeyValue (org.apache.hadoop.hbase.KeyValue)5