Search in sources :

Example 6 with SingleColumnValueFilter

use of org.apache.hadoop.hbase.filter.SingleColumnValueFilter in project hbase by apache.

the class TestHRegion method buildScanner.

private InternalScanner buildScanner(String keyPrefix, String value, HRegion r) throws IOException {
    // Defaults FilterList.Operator.MUST_PASS_ALL.
    FilterList allFilters = new FilterList();
    allFilters.addFilter(new PrefixFilter(Bytes.toBytes(keyPrefix)));
    // Only return rows where this column value exists in the row.
    SingleColumnValueFilter filter = new SingleColumnValueFilter(Bytes.toBytes("trans-tags"), Bytes.toBytes("qual2"), CompareOp.EQUAL, Bytes.toBytes(value));
    filter.setFilterIfMissing(true);
    allFilters.addFilter(filter);
    Scan scan = new Scan();
    scan.addFamily(Bytes.toBytes("trans-blob"));
    scan.addFamily(Bytes.toBytes("trans-type"));
    scan.addFamily(Bytes.toBytes("trans-date"));
    scan.addFamily(Bytes.toBytes("trans-tags"));
    scan.addFamily(Bytes.toBytes("trans-group"));
    scan.setFilter(allFilters);
    return r.getScanner(scan);
}
Also used : SingleColumnValueFilter(org.apache.hadoop.hbase.filter.SingleColumnValueFilter) PrefixFilter(org.apache.hadoop.hbase.filter.PrefixFilter) FilterList(org.apache.hadoop.hbase.filter.FilterList) Scan(org.apache.hadoop.hbase.client.Scan)

Example 7 with SingleColumnValueFilter

use of org.apache.hadoop.hbase.filter.SingleColumnValueFilter in project hbase by apache.

the class TestHRegion method testFlushCacheWhileScanning.

/**
   * Flushes the cache in a thread while scanning. The tests verify that the
   * scan is coherent - e.g. the returned results are always of the same or
   * later update as the previous results.
   *
   * @throws IOException
   *           scan / compact
   * @throws InterruptedException
   *           thread join
   */
@Test
public void testFlushCacheWhileScanning() throws IOException, InterruptedException {
    byte[] family = Bytes.toBytes("family");
    int numRows = 1000;
    int flushAndScanInterval = 10;
    int compactInterval = 10 * flushAndScanInterval;
    this.region = initHRegion(tableName, method, CONF, family);
    FlushThread flushThread = new FlushThread();
    try {
        flushThread.start();
        Scan scan = new Scan();
        scan.addFamily(family);
        scan.setFilter(new SingleColumnValueFilter(family, qual1, CompareOp.EQUAL, new BinaryComparator(Bytes.toBytes(5L))));
        int expectedCount = 0;
        List<Cell> res = new ArrayList<>();
        boolean toggle = true;
        for (long i = 0; i < numRows; i++) {
            Put put = new Put(Bytes.toBytes(i));
            put.setDurability(Durability.SKIP_WAL);
            put.addColumn(family, qual1, Bytes.toBytes(i % 10));
            region.put(put);
            if (i != 0 && i % compactInterval == 0) {
                LOG.debug("iteration = " + i + " ts=" + System.currentTimeMillis());
                region.compact(true);
            }
            if (i % 10 == 5L) {
                expectedCount++;
            }
            if (i != 0 && i % flushAndScanInterval == 0) {
                res.clear();
                InternalScanner scanner = region.getScanner(scan);
                if (toggle) {
                    flushThread.flush();
                }
                while (scanner.next(res)) ;
                if (!toggle) {
                    flushThread.flush();
                }
                assertEquals("toggle=" + toggle + "i=" + i + " ts=" + System.currentTimeMillis(), expectedCount, res.size());
                toggle = !toggle;
            }
        }
    } finally {
        try {
            flushThread.done();
            flushThread.join();
            flushThread.checkNoError();
        } catch (InterruptedException ie) {
            LOG.warn("Caught exception when joining with flushThread", ie);
        }
        HBaseTestingUtility.closeRegionAndWAL(this.region);
        this.region = null;
    }
}
Also used : SingleColumnValueFilter(org.apache.hadoop.hbase.filter.SingleColumnValueFilter) ArrayList(java.util.ArrayList) BinaryComparator(org.apache.hadoop.hbase.filter.BinaryComparator) Put(org.apache.hadoop.hbase.client.Put) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell) Test(org.junit.Test)

Example 8 with SingleColumnValueFilter

use of org.apache.hadoop.hbase.filter.SingleColumnValueFilter in project hbase by apache.

the class TestHRegion method testIndexesScanWithOneDeletedRow.

@Test
public void testIndexesScanWithOneDeletedRow() throws IOException {
    byte[] family = Bytes.toBytes("family");
    // Setting up region
    this.region = initHRegion(tableName, method, CONF, family);
    try {
        Put put = new Put(Bytes.toBytes(1L));
        put.addColumn(family, qual1, 1L, Bytes.toBytes(1L));
        region.put(put);
        region.flush(true);
        Delete delete = new Delete(Bytes.toBytes(1L), 1L);
        region.delete(delete);
        put = new Put(Bytes.toBytes(2L));
        put.addColumn(family, qual1, 2L, Bytes.toBytes(2L));
        region.put(put);
        Scan idxScan = new Scan();
        idxScan.addFamily(family);
        idxScan.setFilter(new FilterList(FilterList.Operator.MUST_PASS_ALL, Arrays.<Filter>asList(new SingleColumnValueFilter(family, qual1, CompareOp.GREATER_OR_EQUAL, new BinaryComparator(Bytes.toBytes(0L))), new SingleColumnValueFilter(family, qual1, CompareOp.LESS_OR_EQUAL, new BinaryComparator(Bytes.toBytes(3L))))));
        InternalScanner scanner = region.getScanner(idxScan);
        List<Cell> res = new ArrayList<>();
        while (scanner.next(res)) ;
        assertEquals(1L, res.size());
    } finally {
        HBaseTestingUtility.closeRegionAndWAL(this.region);
        this.region = null;
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) SingleColumnValueFilter(org.apache.hadoop.hbase.filter.SingleColumnValueFilter) PrefixFilter(org.apache.hadoop.hbase.filter.PrefixFilter) ColumnCountGetFilter(org.apache.hadoop.hbase.filter.ColumnCountGetFilter) SingleColumnValueExcludeFilter(org.apache.hadoop.hbase.filter.SingleColumnValueExcludeFilter) Filter(org.apache.hadoop.hbase.filter.Filter) SingleColumnValueFilter(org.apache.hadoop.hbase.filter.SingleColumnValueFilter) ArrayList(java.util.ArrayList) Scan(org.apache.hadoop.hbase.client.Scan) FilterList(org.apache.hadoop.hbase.filter.FilterList) Cell(org.apache.hadoop.hbase.Cell) Put(org.apache.hadoop.hbase.client.Put) BinaryComparator(org.apache.hadoop.hbase.filter.BinaryComparator) Test(org.junit.Test)

Example 9 with SingleColumnValueFilter

use of org.apache.hadoop.hbase.filter.SingleColumnValueFilter in project hbase by apache.

the class TestJoinedScanners method runScanner.

private void runScanner(Table table, boolean slow) throws Exception {
    long time = System.nanoTime();
    Scan scan = new Scan();
    scan.addColumn(cf_essential, col_name);
    scan.addColumn(cf_joined, col_name);
    SingleColumnValueFilter filter = new SingleColumnValueFilter(cf_essential, col_name, CompareFilter.CompareOp.EQUAL, flag_yes);
    filter.setFilterIfMissing(true);
    scan.setFilter(filter);
    scan.setLoadColumnFamiliesOnDemand(!slow);
    ResultScanner result_scanner = table.getScanner(scan);
    Result res;
    long rows_count = 0;
    while ((res = result_scanner.next()) != null) {
        rows_count++;
    }
    double timeSec = (System.nanoTime() - time) / 1000000000.0;
    result_scanner.close();
    LOG.info((slow ? "Slow" : "Joined") + " scanner finished in " + Double.toString(timeSec) + " seconds, got " + Long.toString(rows_count / 2) + " rows");
}
Also used : SingleColumnValueFilter(org.apache.hadoop.hbase.filter.SingleColumnValueFilter) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) Scan(org.apache.hadoop.hbase.client.Scan) Result(org.apache.hadoop.hbase.client.Result)

Example 10 with SingleColumnValueFilter

use of org.apache.hadoop.hbase.filter.SingleColumnValueFilter in project hbase by apache.

the class TestSCVFWithMiniCluster method setUp.

@BeforeClass
public static void setUp() throws Exception {
    HBaseTestingUtility util = new HBaseTestingUtility();
    util.startMiniCluster(1);
    Admin admin = util.getAdmin();
    destroy(admin, HBASE_TABLE_NAME);
    create(admin, HBASE_TABLE_NAME, FAMILY_A, FAMILY_B);
    admin.close();
    htable = util.getConnection().getTable(HBASE_TABLE_NAME);
    /* Add some values */
    List<Put> puts = new ArrayList<>();
    /* Add a row with 'a:foo' = false */
    Put put = new Put(Bytes.toBytes("1"));
    put.setDurability(Durability.SKIP_WAL);
    put.addColumn(FAMILY_A, QUALIFIER_FOO, Bytes.toBytes("false"));
    put.addColumn(FAMILY_A, QUALIFIER_BAR, Bytes.toBytes("_flag_"));
    put.addColumn(FAMILY_B, QUALIFIER_FOO, Bytes.toBytes("_flag_"));
    put.addColumn(FAMILY_B, QUALIFIER_BAR, Bytes.toBytes("_flag_"));
    puts.add(put);
    /* Add a row with 'a:foo' = true */
    put = new Put(Bytes.toBytes("2"));
    put.setDurability(Durability.SKIP_WAL);
    put.addColumn(FAMILY_A, QUALIFIER_FOO, Bytes.toBytes("true"));
    put.addColumn(FAMILY_A, QUALIFIER_BAR, Bytes.toBytes("_flag_"));
    put.addColumn(FAMILY_B, QUALIFIER_FOO, Bytes.toBytes("_flag_"));
    put.addColumn(FAMILY_B, QUALIFIER_BAR, Bytes.toBytes("_flag_"));
    puts.add(put);
    /* Add a row with 'a:foo' qualifier not set */
    put = new Put(Bytes.toBytes("3"));
    put.setDurability(Durability.SKIP_WAL);
    put.addColumn(FAMILY_A, QUALIFIER_BAR, Bytes.toBytes("_flag_"));
    put.addColumn(FAMILY_B, QUALIFIER_FOO, Bytes.toBytes("_flag_"));
    put.addColumn(FAMILY_B, QUALIFIER_BAR, Bytes.toBytes("_flag_"));
    puts.add(put);
    htable.put(puts);
    /*
     * We want to filter out from the scan all rows that do not have the column 'a:foo' with value
     * 'false'. Only row with key '1' should be returned in the scan.
     */
    scanFilter = new SingleColumnValueFilter(FAMILY_A, QUALIFIER_FOO, CompareOp.EQUAL, new BinaryComparator(Bytes.toBytes("false")));
    ((SingleColumnValueFilter) scanFilter).setFilterIfMissing(true);
}
Also used : SingleColumnValueFilter(org.apache.hadoop.hbase.filter.SingleColumnValueFilter) HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) ArrayList(java.util.ArrayList) Admin(org.apache.hadoop.hbase.client.Admin) Put(org.apache.hadoop.hbase.client.Put) BinaryComparator(org.apache.hadoop.hbase.filter.BinaryComparator) BeforeClass(org.junit.BeforeClass)

Aggregations

SingleColumnValueFilter (org.apache.hadoop.hbase.filter.SingleColumnValueFilter)30 Scan (org.apache.hadoop.hbase.client.Scan)12 BinaryComparator (org.apache.hadoop.hbase.filter.BinaryComparator)9 Filter (org.apache.hadoop.hbase.filter.Filter)9 FilterList (org.apache.hadoop.hbase.filter.FilterList)8 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)7 BitComparator (org.apache.hadoop.hbase.filter.BitComparator)7 Result (org.apache.hadoop.hbase.client.Result)6 RowFilter (org.apache.hadoop.hbase.filter.RowFilter)6 Test (org.junit.Test)5 ArrayList (java.util.ArrayList)4 FirstKeyOnlyFilter (org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter)4 Put (org.apache.hadoop.hbase.client.Put)3 CompareOp (org.apache.hadoop.hbase.filter.CompareFilter.CompareOp)3 PrefixFilter (org.apache.hadoop.hbase.filter.PrefixFilter)3 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)3 SchemaPath (org.apache.drill.common.expression.SchemaPath)2 Cell (org.apache.hadoop.hbase.Cell)2 TableName (org.apache.hadoop.hbase.TableName)2 HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)2