Search in sources :

Example 1 with SingleColumnValueExcludeFilter

use of org.apache.hadoop.hbase.filter.SingleColumnValueExcludeFilter in project hbase by apache.

the class TestServerSideScanMetricsFromClientSide method testRowsFilteredMetric.

public void testRowsFilteredMetric(Scan baseScan) throws Exception {
    testRowsFilteredMetric(baseScan, null, 0);
    // Row filter doesn't match any row key. All rows should be filtered
    Filter filter = new RowFilter(CompareOp.EQUAL, new BinaryComparator("xyz".getBytes()));
    testRowsFilteredMetric(baseScan, filter, ROWS.length);
    // Filter will return results containing only the first key. Number of entire rows filtered
    // should be 0.
    filter = new FirstKeyOnlyFilter();
    testRowsFilteredMetric(baseScan, filter, 0);
    // Column prefix will find some matching qualifier on each row. Number of entire rows filtered
    // should be 0
    filter = new ColumnPrefixFilter(QUALIFIERS[0]);
    testRowsFilteredMetric(baseScan, filter, 0);
    // Column prefix will NOT find any matching qualifier on any row. All rows should be filtered
    filter = new ColumnPrefixFilter("xyz".getBytes());
    testRowsFilteredMetric(baseScan, filter, ROWS.length);
    // Matching column value should exist in each row. No rows should be filtered.
    filter = new SingleColumnValueFilter(FAMILIES[0], QUALIFIERS[0], CompareOp.EQUAL, VALUE);
    testRowsFilteredMetric(baseScan, filter, 0);
    // No matching column value should exist in any row. Filter all rows
    filter = new SingleColumnValueFilter(FAMILIES[0], QUALIFIERS[0], CompareOp.NOT_EQUAL, VALUE);
    testRowsFilteredMetric(baseScan, filter, ROWS.length);
    List<Filter> filters = new ArrayList<>();
    filters.add(new RowFilter(CompareOp.EQUAL, new BinaryComparator(ROWS[0])));
    filters.add(new RowFilter(CompareOp.EQUAL, new BinaryComparator(ROWS[3])));
    int numberOfMatchingRowFilters = filters.size();
    filter = new FilterList(Operator.MUST_PASS_ONE, filters);
    testRowsFilteredMetric(baseScan, filter, ROWS.length - numberOfMatchingRowFilters);
    filters.clear();
    // array in RegionScanner#nextInternal which should be interpreted as a row being filtered.
    for (int family = 0; family < FAMILIES.length; family++) {
        for (int qualifier = 0; qualifier < QUALIFIERS.length; qualifier++) {
            filters.add(new SingleColumnValueExcludeFilter(FAMILIES[family], QUALIFIERS[qualifier], CompareOp.EQUAL, VALUE));
        }
    }
    filter = new FilterList(Operator.MUST_PASS_ONE, filters);
    testRowsFilteredMetric(baseScan, filter, ROWS.length);
}
Also used : ColumnPrefixFilter(org.apache.hadoop.hbase.filter.ColumnPrefixFilter) RowFilter(org.apache.hadoop.hbase.filter.RowFilter) SingleColumnValueFilter(org.apache.hadoop.hbase.filter.SingleColumnValueFilter) SingleColumnValueExcludeFilter(org.apache.hadoop.hbase.filter.SingleColumnValueExcludeFilter) ColumnPrefixFilter(org.apache.hadoop.hbase.filter.ColumnPrefixFilter) RowFilter(org.apache.hadoop.hbase.filter.RowFilter) FirstKeyOnlyFilter(org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter) SingleColumnValueExcludeFilter(org.apache.hadoop.hbase.filter.SingleColumnValueExcludeFilter) Filter(org.apache.hadoop.hbase.filter.Filter) SingleColumnValueFilter(org.apache.hadoop.hbase.filter.SingleColumnValueFilter) FirstKeyOnlyFilter(org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter) ArrayList(java.util.ArrayList) FilterList(org.apache.hadoop.hbase.filter.FilterList) BinaryComparator(org.apache.hadoop.hbase.filter.BinaryComparator)

Example 2 with SingleColumnValueExcludeFilter

use of org.apache.hadoop.hbase.filter.SingleColumnValueExcludeFilter in project hbase by apache.

the class TestHRegion method testScanner_JoinedScanners.

/**
   * Added for HBASE-5416
   *
   * Here we test scan optimization when only subset of CFs are used in filter
   * conditions.
   */
@Test
public void testScanner_JoinedScanners() throws IOException {
    byte[] cf_essential = Bytes.toBytes("essential");
    byte[] cf_joined = Bytes.toBytes("joined");
    byte[] cf_alpha = Bytes.toBytes("alpha");
    this.region = initHRegion(tableName, method, CONF, cf_essential, cf_joined, cf_alpha);
    try {
        byte[] row1 = Bytes.toBytes("row1");
        byte[] row2 = Bytes.toBytes("row2");
        byte[] row3 = Bytes.toBytes("row3");
        byte[] col_normal = Bytes.toBytes("d");
        byte[] col_alpha = Bytes.toBytes("a");
        byte[] filtered_val = Bytes.toBytes(3);
        Put put = new Put(row1);
        put.addColumn(cf_essential, col_normal, Bytes.toBytes(1));
        put.addColumn(cf_joined, col_alpha, Bytes.toBytes(1));
        region.put(put);
        put = new Put(row2);
        put.addColumn(cf_essential, col_alpha, Bytes.toBytes(2));
        put.addColumn(cf_joined, col_normal, Bytes.toBytes(2));
        put.addColumn(cf_alpha, col_alpha, Bytes.toBytes(2));
        region.put(put);
        put = new Put(row3);
        put.addColumn(cf_essential, col_normal, filtered_val);
        put.addColumn(cf_joined, col_normal, filtered_val);
        region.put(put);
        // Check two things:
        // 1. result list contains expected values
        // 2. result list is sorted properly
        Scan scan = new Scan();
        Filter filter = new SingleColumnValueExcludeFilter(cf_essential, col_normal, CompareOp.NOT_EQUAL, filtered_val);
        scan.setFilter(filter);
        scan.setLoadColumnFamiliesOnDemand(true);
        InternalScanner s = region.getScanner(scan);
        List<Cell> results = new ArrayList<>();
        assertTrue(s.next(results));
        assertEquals(results.size(), 1);
        results.clear();
        assertTrue(s.next(results));
        assertEquals(results.size(), 3);
        assertTrue("orderCheck", CellUtil.matchingFamily(results.get(0), cf_alpha));
        assertTrue("orderCheck", CellUtil.matchingFamily(results.get(1), cf_essential));
        assertTrue("orderCheck", CellUtil.matchingFamily(results.get(2), cf_joined));
        results.clear();
        assertFalse(s.next(results));
        assertEquals(results.size(), 0);
    } finally {
        HBaseTestingUtility.closeRegionAndWAL(this.region);
        this.region = null;
    }
}
Also used : SingleColumnValueExcludeFilter(org.apache.hadoop.hbase.filter.SingleColumnValueExcludeFilter) PrefixFilter(org.apache.hadoop.hbase.filter.PrefixFilter) ColumnCountGetFilter(org.apache.hadoop.hbase.filter.ColumnCountGetFilter) SingleColumnValueExcludeFilter(org.apache.hadoop.hbase.filter.SingleColumnValueExcludeFilter) Filter(org.apache.hadoop.hbase.filter.Filter) SingleColumnValueFilter(org.apache.hadoop.hbase.filter.SingleColumnValueFilter) ArrayList(java.util.ArrayList) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell) Put(org.apache.hadoop.hbase.client.Put) Test(org.junit.Test)

Aggregations

ArrayList (java.util.ArrayList)2 Filter (org.apache.hadoop.hbase.filter.Filter)2 SingleColumnValueExcludeFilter (org.apache.hadoop.hbase.filter.SingleColumnValueExcludeFilter)2 SingleColumnValueFilter (org.apache.hadoop.hbase.filter.SingleColumnValueFilter)2 Cell (org.apache.hadoop.hbase.Cell)1 Put (org.apache.hadoop.hbase.client.Put)1 Scan (org.apache.hadoop.hbase.client.Scan)1 BinaryComparator (org.apache.hadoop.hbase.filter.BinaryComparator)1 ColumnCountGetFilter (org.apache.hadoop.hbase.filter.ColumnCountGetFilter)1 ColumnPrefixFilter (org.apache.hadoop.hbase.filter.ColumnPrefixFilter)1 FilterList (org.apache.hadoop.hbase.filter.FilterList)1 FirstKeyOnlyFilter (org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter)1 PrefixFilter (org.apache.hadoop.hbase.filter.PrefixFilter)1 RowFilter (org.apache.hadoop.hbase.filter.RowFilter)1 Test (org.junit.Test)1