Search in sources :

Example 96 with Filter

use of org.apache.hadoop.hbase.filter.Filter in project hbase by apache.

the class TestReversibleScanners method testReversibleRegionScanner.

@Test
public void testReversibleRegionScanner() throws IOException {
    byte[] FAMILYNAME2 = Bytes.toBytes("testCf2");
    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName())).addFamily(new HColumnDescriptor(FAMILYNAME)).addFamily(new HColumnDescriptor(FAMILYNAME2));
    Region region = TEST_UTIL.createLocalHRegion(htd, null, null);
    loadDataToRegion(region, FAMILYNAME2);
    // verify row count with forward scan
    Scan scan = new Scan();
    InternalScanner scanner = region.getScanner(scan);
    verifyCountAndOrder(scanner, ROWSIZE * QUALSIZE * 2, ROWSIZE, true);
    // Case1:Full reversed scan
    scan.setReversed(true);
    scanner = region.getScanner(scan);
    verifyCountAndOrder(scanner, ROWSIZE * QUALSIZE * 2, ROWSIZE, false);
    // Case2:Full reversed scan with one family
    scan = new Scan();
    scan.setReversed(true);
    scan.addFamily(FAMILYNAME);
    scanner = region.getScanner(scan);
    verifyCountAndOrder(scanner, ROWSIZE * QUALSIZE, ROWSIZE, false);
    // Case3:Specify qualifiers + One family
    byte[][] specifiedQualifiers = { QUALS[1], QUALS[2] };
    for (byte[] specifiedQualifier : specifiedQualifiers) scan.addColumn(FAMILYNAME, specifiedQualifier);
    scanner = region.getScanner(scan);
    verifyCountAndOrder(scanner, ROWSIZE * 2, ROWSIZE, false);
    // Case4:Specify qualifiers + Two families
    for (byte[] specifiedQualifier : specifiedQualifiers) scan.addColumn(FAMILYNAME2, specifiedQualifier);
    scanner = region.getScanner(scan);
    verifyCountAndOrder(scanner, ROWSIZE * 2 * 2, ROWSIZE, false);
    // Case5: Case4 + specify start row
    int startRowNum = ROWSIZE * 3 / 4;
    scan.setStartRow(ROWS[startRowNum]);
    scanner = region.getScanner(scan);
    verifyCountAndOrder(scanner, (startRowNum + 1) * 2 * 2, (startRowNum + 1), false);
    // Case6: Case4 + specify stop row
    int stopRowNum = ROWSIZE / 4;
    scan.setStartRow(HConstants.EMPTY_BYTE_ARRAY);
    scan.setStopRow(ROWS[stopRowNum]);
    scanner = region.getScanner(scan);
    verifyCountAndOrder(scanner, (ROWSIZE - stopRowNum - 1) * 2 * 2, (ROWSIZE - stopRowNum - 1), false);
    // Case7: Case4 + specify start row + specify stop row
    scan.setStartRow(ROWS[startRowNum]);
    scanner = region.getScanner(scan);
    verifyCountAndOrder(scanner, (startRowNum - stopRowNum) * 2 * 2, (startRowNum - stopRowNum), false);
    // Case8: Case7 + SingleColumnValueFilter
    int valueNum = startRowNum % VALUESIZE;
    Filter filter = new SingleColumnValueFilter(FAMILYNAME, specifiedQualifiers[0], CompareOp.EQUAL, VALUES[valueNum]);
    scan.setFilter(filter);
    scanner = region.getScanner(scan);
    int unfilteredRowNum = (startRowNum - stopRowNum) / VALUESIZE + (stopRowNum / VALUESIZE == valueNum ? 0 : 1);
    verifyCountAndOrder(scanner, unfilteredRowNum * 2 * 2, unfilteredRowNum, false);
    // Case9: Case7 + PageFilter
    int pageSize = 10;
    filter = new PageFilter(pageSize);
    scan.setFilter(filter);
    scanner = region.getScanner(scan);
    int expectedRowNum = pageSize;
    verifyCountAndOrder(scanner, expectedRowNum * 2 * 2, expectedRowNum, false);
    // Case10: Case7 + FilterList+MUST_PASS_ONE
    SingleColumnValueFilter scvFilter1 = new SingleColumnValueFilter(FAMILYNAME, specifiedQualifiers[0], CompareOp.EQUAL, VALUES[0]);
    SingleColumnValueFilter scvFilter2 = new SingleColumnValueFilter(FAMILYNAME, specifiedQualifiers[0], CompareOp.EQUAL, VALUES[1]);
    expectedRowNum = 0;
    for (int i = startRowNum; i > stopRowNum; i--) {
        if (i % VALUESIZE == 0 || i % VALUESIZE == 1) {
            expectedRowNum++;
        }
    }
    filter = new FilterList(Operator.MUST_PASS_ONE, scvFilter1, scvFilter2);
    scan.setFilter(filter);
    scanner = region.getScanner(scan);
    verifyCountAndOrder(scanner, expectedRowNum * 2 * 2, expectedRowNum, false);
    // Case10: Case7 + FilterList+MUST_PASS_ALL
    filter = new FilterList(Operator.MUST_PASS_ALL, scvFilter1, scvFilter2);
    expectedRowNum = 0;
    scan.setFilter(filter);
    scanner = region.getScanner(scan);
    verifyCountAndOrder(scanner, expectedRowNum * 2 * 2, expectedRowNum, false);
}
Also used : SingleColumnValueFilter(org.apache.hadoop.hbase.filter.SingleColumnValueFilter) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) PageFilter(org.apache.hadoop.hbase.filter.PageFilter) Filter(org.apache.hadoop.hbase.filter.Filter) SingleColumnValueFilter(org.apache.hadoop.hbase.filter.SingleColumnValueFilter) Scan(org.apache.hadoop.hbase.client.Scan) PageFilter(org.apache.hadoop.hbase.filter.PageFilter) FilterList(org.apache.hadoop.hbase.filter.FilterList) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Test(org.junit.Test)

Example 97 with Filter

use of org.apache.hadoop.hbase.filter.Filter in project hbase by apache.

the class TestHRegion method testScanner_JoinedScanners.

/**
   * Added for HBASE-5416
   *
   * Here we test scan optimization when only subset of CFs are used in filter
   * conditions.
   */
@Test
public void testScanner_JoinedScanners() throws IOException {
    byte[] cf_essential = Bytes.toBytes("essential");
    byte[] cf_joined = Bytes.toBytes("joined");
    byte[] cf_alpha = Bytes.toBytes("alpha");
    this.region = initHRegion(tableName, method, CONF, cf_essential, cf_joined, cf_alpha);
    try {
        byte[] row1 = Bytes.toBytes("row1");
        byte[] row2 = Bytes.toBytes("row2");
        byte[] row3 = Bytes.toBytes("row3");
        byte[] col_normal = Bytes.toBytes("d");
        byte[] col_alpha = Bytes.toBytes("a");
        byte[] filtered_val = Bytes.toBytes(3);
        Put put = new Put(row1);
        put.addColumn(cf_essential, col_normal, Bytes.toBytes(1));
        put.addColumn(cf_joined, col_alpha, Bytes.toBytes(1));
        region.put(put);
        put = new Put(row2);
        put.addColumn(cf_essential, col_alpha, Bytes.toBytes(2));
        put.addColumn(cf_joined, col_normal, Bytes.toBytes(2));
        put.addColumn(cf_alpha, col_alpha, Bytes.toBytes(2));
        region.put(put);
        put = new Put(row3);
        put.addColumn(cf_essential, col_normal, filtered_val);
        put.addColumn(cf_joined, col_normal, filtered_val);
        region.put(put);
        // Check two things:
        // 1. result list contains expected values
        // 2. result list is sorted properly
        Scan scan = new Scan();
        Filter filter = new SingleColumnValueExcludeFilter(cf_essential, col_normal, CompareOp.NOT_EQUAL, filtered_val);
        scan.setFilter(filter);
        scan.setLoadColumnFamiliesOnDemand(true);
        InternalScanner s = region.getScanner(scan);
        List<Cell> results = new ArrayList<>();
        assertTrue(s.next(results));
        assertEquals(results.size(), 1);
        results.clear();
        assertTrue(s.next(results));
        assertEquals(results.size(), 3);
        assertTrue("orderCheck", CellUtil.matchingFamily(results.get(0), cf_alpha));
        assertTrue("orderCheck", CellUtil.matchingFamily(results.get(1), cf_essential));
        assertTrue("orderCheck", CellUtil.matchingFamily(results.get(2), cf_joined));
        results.clear();
        assertFalse(s.next(results));
        assertEquals(results.size(), 0);
    } finally {
        HBaseTestingUtility.closeRegionAndWAL(this.region);
        this.region = null;
    }
}
Also used : SingleColumnValueExcludeFilter(org.apache.hadoop.hbase.filter.SingleColumnValueExcludeFilter) PrefixFilter(org.apache.hadoop.hbase.filter.PrefixFilter) ColumnCountGetFilter(org.apache.hadoop.hbase.filter.ColumnCountGetFilter) SingleColumnValueExcludeFilter(org.apache.hadoop.hbase.filter.SingleColumnValueExcludeFilter) Filter(org.apache.hadoop.hbase.filter.Filter) SingleColumnValueFilter(org.apache.hadoop.hbase.filter.SingleColumnValueFilter) ArrayList(java.util.ArrayList) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell) Put(org.apache.hadoop.hbase.client.Put) Test(org.junit.Test)

Example 98 with Filter

use of org.apache.hadoop.hbase.filter.Filter in project hbase by apache.

the class ScannerModel method fromScan.

/**
   * @param scan the scan specification
   * @throws Exception
   */
public static ScannerModel fromScan(Scan scan) throws Exception {
    ScannerModel model = new ScannerModel();
    model.setStartRow(scan.getStartRow());
    model.setEndRow(scan.getStopRow());
    Map<byte[], NavigableSet<byte[]>> families = scan.getFamilyMap();
    if (families != null) {
        for (Map.Entry<byte[], NavigableSet<byte[]>> entry : families.entrySet()) {
            if (entry.getValue() != null) {
                for (byte[] qualifier : entry.getValue()) {
                    model.addColumn(Bytes.add(entry.getKey(), COLUMN_DIVIDER, qualifier));
                }
            } else {
                model.addColumn(entry.getKey());
            }
        }
    }
    model.setStartTime(scan.getTimeRange().getMin());
    model.setEndTime(scan.getTimeRange().getMax());
    int caching = scan.getCaching();
    if (caching > 0) {
        model.setCaching(caching);
    }
    int batch = scan.getBatch();
    if (batch > 0) {
        model.setBatch(batch);
    }
    int maxVersions = scan.getMaxVersions();
    if (maxVersions > 0) {
        model.setMaxVersions(maxVersions);
    }
    Filter filter = scan.getFilter();
    if (filter != null) {
        model.setFilter(stringifyFilter(filter));
    }
    // Add the visbility labels if found in the attributes
    Authorizations authorizations = scan.getAuthorizations();
    if (authorizations != null) {
        List<String> labels = authorizations.getLabels();
        for (String label : labels) {
            model.addLabel(label);
        }
    }
    return model;
}
Also used : NavigableSet(java.util.NavigableSet) Authorizations(org.apache.hadoop.hbase.security.visibility.Authorizations) InclusiveStopFilter(org.apache.hadoop.hbase.filter.InclusiveStopFilter) RandomRowFilter(org.apache.hadoop.hbase.filter.RandomRowFilter) RowFilter(org.apache.hadoop.hbase.filter.RowFilter) FirstKeyOnlyFilter(org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter) ColumnCountGetFilter(org.apache.hadoop.hbase.filter.ColumnCountGetFilter) SingleColumnValueExcludeFilter(org.apache.hadoop.hbase.filter.SingleColumnValueExcludeFilter) WhileMatchFilter(org.apache.hadoop.hbase.filter.WhileMatchFilter) DependentColumnFilter(org.apache.hadoop.hbase.filter.DependentColumnFilter) PrefixFilter(org.apache.hadoop.hbase.filter.PrefixFilter) QualifierFilter(org.apache.hadoop.hbase.filter.QualifierFilter) PageFilter(org.apache.hadoop.hbase.filter.PageFilter) Filter(org.apache.hadoop.hbase.filter.Filter) KeyOnlyFilter(org.apache.hadoop.hbase.filter.KeyOnlyFilter) SingleColumnValueFilter(org.apache.hadoop.hbase.filter.SingleColumnValueFilter) FamilyFilter(org.apache.hadoop.hbase.filter.FamilyFilter) ColumnPrefixFilter(org.apache.hadoop.hbase.filter.ColumnPrefixFilter) ColumnPaginationFilter(org.apache.hadoop.hbase.filter.ColumnPaginationFilter) MultiRowRangeFilter(org.apache.hadoop.hbase.filter.MultiRowRangeFilter) ValueFilter(org.apache.hadoop.hbase.filter.ValueFilter) SkipFilter(org.apache.hadoop.hbase.filter.SkipFilter) TimestampsFilter(org.apache.hadoop.hbase.filter.TimestampsFilter) ColumnRangeFilter(org.apache.hadoop.hbase.filter.ColumnRangeFilter) CompareFilter(org.apache.hadoop.hbase.filter.CompareFilter) MultipleColumnPrefixFilter(org.apache.hadoop.hbase.filter.MultipleColumnPrefixFilter) ByteString(com.google.protobuf.ByteString) Map(java.util.Map)

Example 99 with Filter

use of org.apache.hadoop.hbase.filter.Filter in project hbase by apache.

the class TestScannersWithFilters method testSkipFilter.

@Test
public void testSkipFilter() throws Exception {
    // Test for qualifier regex: "testQualifierOne-2"
    // Should only get rows from second group, and all keys
    Filter f = new SkipFilter(new QualifierFilter(CompareOp.NOT_EQUAL, new BinaryComparator(Bytes.toBytes("testQualifierOne-2"))));
    Scan s = new Scan();
    s.setFilter(f);
    KeyValue[] kvs = { // testRowTwo-0
    new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), // testRowTwo-2
    new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), // testRowTwo-3
    new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]) };
    verifyScanFull(s, kvs);
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) InclusiveStopFilter(org.apache.hadoop.hbase.filter.InclusiveStopFilter) RowFilter(org.apache.hadoop.hbase.filter.RowFilter) FirstKeyOnlyFilter(org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter) PrefixFilter(org.apache.hadoop.hbase.filter.PrefixFilter) QualifierFilter(org.apache.hadoop.hbase.filter.QualifierFilter) PageFilter(org.apache.hadoop.hbase.filter.PageFilter) Filter(org.apache.hadoop.hbase.filter.Filter) ValueFilter(org.apache.hadoop.hbase.filter.ValueFilter) SkipFilter(org.apache.hadoop.hbase.filter.SkipFilter) SkipFilter(org.apache.hadoop.hbase.filter.SkipFilter) Scan(org.apache.hadoop.hbase.client.Scan) BinaryComparator(org.apache.hadoop.hbase.filter.BinaryComparator) QualifierFilter(org.apache.hadoop.hbase.filter.QualifierFilter) Test(org.junit.Test)

Example 100 with Filter

use of org.apache.hadoop.hbase.filter.Filter in project hbase by apache.

the class TestScannersWithFilters method testValueFilter.

@Test
public void testValueFilter() throws Exception {
    // Match group one rows
    long expectedRows = numRows / 2;
    long expectedKeys = colsPerRow;
    Filter f = new ValueFilter(CompareOp.EQUAL, new BinaryComparator(Bytes.toBytes("testValueOne")));
    Scan s = new Scan();
    s.setFilter(f);
    verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
    // Match group two rows
    expectedRows = numRows / 2;
    expectedKeys = colsPerRow;
    f = new ValueFilter(CompareOp.EQUAL, new BinaryComparator(Bytes.toBytes("testValueTwo")));
    s = new Scan();
    s.setFilter(f);
    verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
    // Match all values using regex
    expectedRows = numRows;
    expectedKeys = colsPerRow;
    f = new ValueFilter(CompareOp.EQUAL, new RegexStringComparator("testValue((One)|(Two))"));
    s = new Scan();
    s.setFilter(f);
    verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
    // Match values less than
    // Expect group one rows
    expectedRows = numRows / 2;
    expectedKeys = colsPerRow;
    f = new ValueFilter(CompareOp.LESS, new BinaryComparator(Bytes.toBytes("testValueTwo")));
    s = new Scan();
    s.setFilter(f);
    verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
    // Match values less than or equal
    // Expect all rows
    expectedRows = numRows;
    expectedKeys = colsPerRow;
    f = new ValueFilter(CompareOp.LESS_OR_EQUAL, new BinaryComparator(Bytes.toBytes("testValueTwo")));
    s = new Scan();
    s.setFilter(f);
    verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
    // Match values less than or equal
    // Expect group one rows
    expectedRows = numRows / 2;
    expectedKeys = colsPerRow;
    f = new ValueFilter(CompareOp.LESS_OR_EQUAL, new BinaryComparator(Bytes.toBytes("testValueOne")));
    s = new Scan();
    s.setFilter(f);
    verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
    // Match values not equal
    // Expect half the rows
    expectedRows = numRows / 2;
    expectedKeys = colsPerRow;
    f = new ValueFilter(CompareOp.NOT_EQUAL, new BinaryComparator(Bytes.toBytes("testValueOne")));
    s = new Scan();
    s.setFilter(f);
    verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
    // Match values greater or equal
    // Expect all rows
    expectedRows = numRows;
    expectedKeys = colsPerRow;
    f = new ValueFilter(CompareOp.GREATER_OR_EQUAL, new BinaryComparator(Bytes.toBytes("testValueOne")));
    s = new Scan();
    s.setFilter(f);
    verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
    // Match values greater
    // Expect half rows
    expectedRows = numRows / 2;
    expectedKeys = colsPerRow;
    f = new ValueFilter(CompareOp.GREATER, new BinaryComparator(Bytes.toBytes("testValueOne")));
    s = new Scan();
    s.setFilter(f);
    verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
    // Match values not equal to testValueOne
    // Look across rows and fully validate the keys and ordering
    // Should see all keys in all group two rows
    f = new ValueFilter(CompareOp.NOT_EQUAL, new BinaryComparator(Bytes.toBytes("testValueOne")));
    s = new Scan();
    s.setFilter(f);
    KeyValue[] kvs = { // testRowTwo-0
    new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), // testRowTwo-2
    new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), // testRowTwo-3
    new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]) };
    verifyScanFull(s, kvs);
}
Also used : RegexStringComparator(org.apache.hadoop.hbase.filter.RegexStringComparator) KeyValue(org.apache.hadoop.hbase.KeyValue) InclusiveStopFilter(org.apache.hadoop.hbase.filter.InclusiveStopFilter) RowFilter(org.apache.hadoop.hbase.filter.RowFilter) FirstKeyOnlyFilter(org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter) PrefixFilter(org.apache.hadoop.hbase.filter.PrefixFilter) QualifierFilter(org.apache.hadoop.hbase.filter.QualifierFilter) PageFilter(org.apache.hadoop.hbase.filter.PageFilter) Filter(org.apache.hadoop.hbase.filter.Filter) ValueFilter(org.apache.hadoop.hbase.filter.ValueFilter) SkipFilter(org.apache.hadoop.hbase.filter.SkipFilter) ValueFilter(org.apache.hadoop.hbase.filter.ValueFilter) Scan(org.apache.hadoop.hbase.client.Scan) BinaryComparator(org.apache.hadoop.hbase.filter.BinaryComparator) Test(org.junit.Test)

Aggregations

Filter (org.apache.hadoop.hbase.filter.Filter)172 Test (org.junit.Test)96 Scan (org.apache.hadoop.hbase.client.Scan)94 BaseConnectionlessQueryTest (org.apache.phoenix.query.BaseConnectionlessQueryTest)77 SkipScanFilter (org.apache.phoenix.filter.SkipScanFilter)76 RowKeyComparisonFilter (org.apache.phoenix.filter.RowKeyComparisonFilter)74 SingleKeyValueComparisonFilter (org.apache.phoenix.filter.SingleKeyValueComparisonFilter)45 TestUtil.rowKeyFilter (org.apache.phoenix.util.TestUtil.rowKeyFilter)45 RowFilter (org.apache.hadoop.hbase.filter.RowFilter)40 FilterList (org.apache.hadoop.hbase.filter.FilterList)39 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)37 TestUtil.multiEncodedKVFilter (org.apache.phoenix.util.TestUtil.multiEncodedKVFilter)33 TestUtil.singleKVFilter (org.apache.phoenix.util.TestUtil.singleKVFilter)33 PhoenixPreparedStatement (org.apache.phoenix.jdbc.PhoenixPreparedStatement)31 FirstKeyOnlyFilter (org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter)27 PrefixFilter (org.apache.hadoop.hbase.filter.PrefixFilter)24 SingleColumnValueFilter (org.apache.hadoop.hbase.filter.SingleColumnValueFilter)23 CompareFilter (org.apache.hadoop.hbase.filter.CompareFilter)22 ArrayList (java.util.ArrayList)19 RegexStringComparator (org.apache.hadoop.hbase.filter.RegexStringComparator)18