Search in sources :

Example 46 with Scan

use of org.apache.hadoop.hbase.client.Scan in project hbase by apache.

the class TestTags method testTagsWithAppendAndIncrement.

@Test
public void testTagsWithAppendAndIncrement() throws Exception {
    TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
    byte[] f = Bytes.toBytes("f");
    byte[] q = Bytes.toBytes("q");
    byte[] row1 = Bytes.toBytes("r1");
    byte[] row2 = Bytes.toBytes("r2");
    HTableDescriptor desc = new HTableDescriptor(tableName);
    HColumnDescriptor colDesc = new HColumnDescriptor(f);
    desc.addFamily(colDesc);
    TEST_UTIL.getAdmin().createTable(desc);
    Table table = null;
    try {
        table = TEST_UTIL.getConnection().getTable(tableName);
        Put put = new Put(row1);
        byte[] v = Bytes.toBytes(2L);
        put.addColumn(f, q, v);
        put.setAttribute("visibility", Bytes.toBytes("tag1"));
        table.put(put);
        Increment increment = new Increment(row1);
        increment.addColumn(f, q, 1L);
        table.increment(increment);
        TestCoprocessorForTags.checkTagPresence = true;
        ResultScanner scanner = table.getScanner(new Scan());
        Result result = scanner.next();
        KeyValue kv = KeyValueUtil.ensureKeyValue(result.getColumnLatestCell(f, q));
        List<Tag> tags = TestCoprocessorForTags.tags;
        assertEquals(3L, Bytes.toLong(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()));
        assertEquals(1, tags.size());
        assertEquals("tag1", Bytes.toString(TagUtil.cloneValue(tags.get(0))));
        TestCoprocessorForTags.checkTagPresence = false;
        TestCoprocessorForTags.tags = null;
        increment = new Increment(row1);
        increment.add(new KeyValue(row1, f, q, 1234L, v));
        increment.setAttribute("visibility", Bytes.toBytes("tag2"));
        table.increment(increment);
        TestCoprocessorForTags.checkTagPresence = true;
        scanner = table.getScanner(new Scan());
        result = scanner.next();
        kv = KeyValueUtil.ensureKeyValue(result.getColumnLatestCell(f, q));
        tags = TestCoprocessorForTags.tags;
        assertEquals(5L, Bytes.toLong(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()));
        assertEquals(2, tags.size());
        // We cannot assume the ordering of tags
        List<String> tagValues = new ArrayList<>();
        for (Tag tag : tags) {
            tagValues.add(Bytes.toString(TagUtil.cloneValue(tag)));
        }
        assertTrue(tagValues.contains("tag1"));
        assertTrue(tagValues.contains("tag2"));
        TestCoprocessorForTags.checkTagPresence = false;
        TestCoprocessorForTags.tags = null;
        put = new Put(row2);
        v = Bytes.toBytes(2L);
        put.addColumn(f, q, v);
        table.put(put);
        increment = new Increment(row2);
        increment.add(new KeyValue(row2, f, q, 1234L, v));
        increment.setAttribute("visibility", Bytes.toBytes("tag2"));
        table.increment(increment);
        TestCoprocessorForTags.checkTagPresence = true;
        scanner = table.getScanner(new Scan().setStartRow(row2));
        result = scanner.next();
        kv = KeyValueUtil.ensureKeyValue(result.getColumnLatestCell(f, q));
        tags = TestCoprocessorForTags.tags;
        assertEquals(4L, Bytes.toLong(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()));
        assertEquals(1, tags.size());
        assertEquals("tag2", Bytes.toString(TagUtil.cloneValue(tags.get(0))));
        TestCoprocessorForTags.checkTagPresence = false;
        TestCoprocessorForTags.tags = null;
        // Test Append
        byte[] row3 = Bytes.toBytes("r3");
        put = new Put(row3);
        put.addColumn(f, q, Bytes.toBytes("a"));
        put.setAttribute("visibility", Bytes.toBytes("tag1"));
        table.put(put);
        Append append = new Append(row3);
        append.add(f, q, Bytes.toBytes("b"));
        table.append(append);
        TestCoprocessorForTags.checkTagPresence = true;
        scanner = table.getScanner(new Scan().setStartRow(row3));
        result = scanner.next();
        kv = KeyValueUtil.ensureKeyValue(result.getColumnLatestCell(f, q));
        tags = TestCoprocessorForTags.tags;
        assertEquals(1, tags.size());
        assertEquals("tag1", Bytes.toString(TagUtil.cloneValue(tags.get(0))));
        TestCoprocessorForTags.checkTagPresence = false;
        TestCoprocessorForTags.tags = null;
        append = new Append(row3);
        append.add(new KeyValue(row3, f, q, 1234L, v));
        append.setAttribute("visibility", Bytes.toBytes("tag2"));
        table.append(append);
        TestCoprocessorForTags.checkTagPresence = true;
        scanner = table.getScanner(new Scan().setStartRow(row3));
        result = scanner.next();
        kv = KeyValueUtil.ensureKeyValue(result.getColumnLatestCell(f, q));
        tags = TestCoprocessorForTags.tags;
        assertEquals(2, tags.size());
        // We cannot assume the ordering of tags
        tagValues.clear();
        for (Tag tag : tags) {
            tagValues.add(Bytes.toString(TagUtil.cloneValue(tag)));
        }
        assertTrue(tagValues.contains("tag1"));
        assertTrue(tagValues.contains("tag2"));
        TestCoprocessorForTags.checkTagPresence = false;
        TestCoprocessorForTags.tags = null;
        byte[] row4 = Bytes.toBytes("r4");
        put = new Put(row4);
        put.addColumn(f, q, Bytes.toBytes("a"));
        table.put(put);
        append = new Append(row4);
        append.add(new KeyValue(row4, f, q, 1234L, v));
        append.setAttribute("visibility", Bytes.toBytes("tag2"));
        table.append(append);
        TestCoprocessorForTags.checkTagPresence = true;
        scanner = table.getScanner(new Scan().setStartRow(row4));
        result = scanner.next();
        kv = KeyValueUtil.ensureKeyValue(result.getColumnLatestCell(f, q));
        tags = TestCoprocessorForTags.tags;
        assertEquals(1, tags.size());
        assertEquals("tag2", Bytes.toString(TagUtil.cloneValue(tags.get(0))));
    } finally {
        TestCoprocessorForTags.checkTagPresence = false;
        TestCoprocessorForTags.tags = null;
        if (table != null) {
            table.close();
        }
    }
}
Also used : Table(org.apache.hadoop.hbase.client.Table) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) KeyValue(org.apache.hadoop.hbase.KeyValue) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) ArrayList(java.util.ArrayList) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Result(org.apache.hadoop.hbase.client.Result) TableName(org.apache.hadoop.hbase.TableName) Append(org.apache.hadoop.hbase.client.Append) Increment(org.apache.hadoop.hbase.client.Increment) Scan(org.apache.hadoop.hbase.client.Scan) ArrayBackedTag(org.apache.hadoop.hbase.ArrayBackedTag) Tag(org.apache.hadoop.hbase.Tag) Test(org.junit.Test)

Example 47 with Scan

use of org.apache.hadoop.hbase.client.Scan in project hbase by apache.

the class TestTags method testFlushAndCompactionWithoutTags.

@Test
public void testFlushAndCompactionWithoutTags() throws Exception {
    Table table = null;
    try {
        TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
        byte[] fam = Bytes.toBytes("info");
        byte[] row = Bytes.toBytes("rowa");
        // column names
        byte[] qual = Bytes.toBytes("qual");
        byte[] row1 = Bytes.toBytes("rowb");
        byte[] row2 = Bytes.toBytes("rowc");
        HTableDescriptor desc = new HTableDescriptor(tableName);
        HColumnDescriptor colDesc = new HColumnDescriptor(fam);
        colDesc.setBlockCacheEnabled(true);
        // colDesc.setDataBlockEncoding(DataBlockEncoding.NONE);
        colDesc.setDataBlockEncoding(DataBlockEncoding.PREFIX_TREE);
        desc.addFamily(colDesc);
        Admin admin = TEST_UTIL.getAdmin();
        admin.createTable(desc);
        table = TEST_UTIL.getConnection().getTable(tableName);
        Put put = new Put(row);
        byte[] value = Bytes.toBytes("value");
        put.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value);
        table.put(put);
        admin.flush(tableName);
        // We are lacking an API for confirming flush request compaction.
        // Just sleep for a short time. We won't be able to confirm flush
        // completion but the test won't hang now or in the future if
        // default compaction policy causes compaction between flush and
        // when we go to confirm it.
        Thread.sleep(1000);
        Put put1 = new Put(row1);
        byte[] value1 = Bytes.toBytes("1000dfsdf");
        put1.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value1);
        table.put(put1);
        admin.flush(tableName);
        Thread.sleep(1000);
        Put put2 = new Put(row2);
        byte[] value2 = Bytes.toBytes("1000dfsdf");
        put2.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value2);
        table.put(put2);
        admin.flush(tableName);
        Thread.sleep(1000);
        Scan s = new Scan(row);
        ResultScanner scanner = table.getScanner(s);
        try {
            Result[] next = scanner.next(3);
            for (Result result : next) {
                CellScanner cellScanner = result.cellScanner();
                cellScanner.advance();
                Cell current = cellScanner.current();
                assertEquals(0, current.getTagsLength());
            }
        } finally {
            if (scanner != null)
                scanner.close();
        }
        admin.compact(tableName);
        while (admin.getCompactionState(tableName) != CompactionState.NONE) {
            Thread.sleep(10);
        }
        s = new Scan(row);
        scanner = table.getScanner(s);
        try {
            Result[] next = scanner.next(3);
            for (Result result : next) {
                CellScanner cellScanner = result.cellScanner();
                cellScanner.advance();
                Cell current = cellScanner.current();
                assertEquals(0, current.getTagsLength());
            }
        } finally {
            if (scanner != null) {
                scanner.close();
            }
        }
    } finally {
        if (table != null) {
            table.close();
        }
    }
}
Also used : Table(org.apache.hadoop.hbase.client.Table) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) Admin(org.apache.hadoop.hbase.client.Admin) CellScanner(org.apache.hadoop.hbase.CellScanner) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Result(org.apache.hadoop.hbase.client.Result) TableName(org.apache.hadoop.hbase.TableName) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell) Test(org.junit.Test)

Example 48 with Scan

use of org.apache.hadoop.hbase.client.Scan in project hbase by apache.

the class TestStoreFile method testEmptyStoreFileRestrictKeyRanges.

@Test
public void testEmptyStoreFileRestrictKeyRanges() throws Exception {
    StoreFileReader reader = mock(StoreFileReader.class);
    Store store = mock(Store.class);
    HColumnDescriptor hcd = mock(HColumnDescriptor.class);
    byte[] cf = Bytes.toBytes("ty");
    when(hcd.getName()).thenReturn(cf);
    when(store.getFamily()).thenReturn(hcd);
    StoreFileScanner scanner = new StoreFileScanner(reader, mock(HFileScanner.class), false, false, 0, 0, true);
    Scan scan = new Scan();
    scan.setColumnFamilyTimeRange(cf, 0, 1);
    assertFalse(scanner.shouldUseScanner(scan, store, 0));
}
Also used : HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) HFileScanner(org.apache.hadoop.hbase.io.hfile.HFileScanner) Scan(org.apache.hadoop.hbase.client.Scan) Test(org.junit.Test)

Example 49 with Scan

use of org.apache.hadoop.hbase.client.Scan in project hbase by apache.

the class TestStoreScanner method testDeleteVersionSameTimestamp.

@Test
public void testDeleteVersionSameTimestamp() throws IOException {
    KeyValue[] kvs = new KeyValue[] { KeyValueTestUtil.create("R1", "cf", "a", 1, KeyValue.Type.Put, "dont-care"), KeyValueTestUtil.create("R1", "cf", "a", 1, KeyValue.Type.Delete, "dont-care") };
    List<KeyValueScanner> scanners = scanFixture(kvs);
    Scan scanSpec = new Scan(Bytes.toBytes("R1"));
    try (StoreScanner scan = new StoreScanner(scanSpec, scanInfo, scanType, getCols("a"), scanners)) {
        List<Cell> results = new ArrayList<>();
        Assert.assertFalse(scan.next(results));
        Assert.assertEquals(0, results.size());
    }
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) ArrayList(java.util.ArrayList) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell) Test(org.junit.Test)

Example 50 with Scan

use of org.apache.hadoop.hbase.client.Scan in project hbase by apache.

the class TestStoreScanner method SKIP_testPeek.

/**
   * TODO this fails, since we don't handle deletions, etc, in peek
   */
public void SKIP_testPeek() throws Exception {
    KeyValue[] kvs = new KeyValue[] { KeyValueTestUtil.create("R1", "cf", "a", 1, KeyValue.Type.Put, "dont-care"), KeyValueTestUtil.create("R1", "cf", "a", 1, KeyValue.Type.Delete, "dont-care") };
    List<KeyValueScanner> scanners = scanFixture(kvs);
    Scan scanSpec = new Scan(Bytes.toBytes("R1"));
    try (StoreScanner scan = new StoreScanner(scanSpec, scanInfo, scanType, getCols("a"), scanners)) {
        Assert.assertNull(scan.peek());
    }
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) Scan(org.apache.hadoop.hbase.client.Scan)

Aggregations

Scan (org.apache.hadoop.hbase.client.Scan)950 Test (org.junit.Test)495 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)302 Result (org.apache.hadoop.hbase.client.Result)286 Cell (org.apache.hadoop.hbase.Cell)258 ArrayList (java.util.ArrayList)238 Table (org.apache.hadoop.hbase.client.Table)178 Put (org.apache.hadoop.hbase.client.Put)161 BaseConnectionlessQueryTest (org.apache.phoenix.query.BaseConnectionlessQueryTest)153 IOException (java.io.IOException)135 TableName (org.apache.hadoop.hbase.TableName)98 Delete (org.apache.hadoop.hbase.client.Delete)95 Filter (org.apache.hadoop.hbase.filter.Filter)95 KeyValue (org.apache.hadoop.hbase.KeyValue)84 Connection (org.apache.hadoop.hbase.client.Connection)81 SkipScanFilter (org.apache.phoenix.filter.SkipScanFilter)78 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)78 RowKeyComparisonFilter (org.apache.phoenix.filter.RowKeyComparisonFilter)72 Configuration (org.apache.hadoop.conf.Configuration)51 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)51