Search in sources :

Example 21 with Filter

use of org.apache.hadoop.hbase.filter.Filter in project hive by apache.

the class HBaseReadWrite method scanDatabases.

/**
   * Get a list of databases.
   * @param regex Regular expression to use in searching for database names.  It is expected to
   *              be a Java regular expression.  If it is null then all databases will be returned.
   * @return list of databases matching the regular expression.
   * @throws IOException
   */
List<Database> scanDatabases(String regex) throws IOException {
    Filter filter = null;
    if (regex != null) {
        filter = new RowFilter(CompareFilter.CompareOp.EQUAL, new RegexStringComparator(regex));
    }
    Iterator<Result> iter = scan(DB_TABLE, CATALOG_CF, CATALOG_COL, filter);
    List<Database> databases = new ArrayList<>();
    while (iter.hasNext()) {
        Result result = iter.next();
        databases.add(HBaseUtils.deserializeDatabase(result.getRow(), result.getValue(CATALOG_CF, CATALOG_COL)));
    }
    return databases;
}
Also used : RegexStringComparator(org.apache.hadoop.hbase.filter.RegexStringComparator) RowFilter(org.apache.hadoop.hbase.filter.RowFilter) RowFilter(org.apache.hadoop.hbase.filter.RowFilter) FirstKeyOnlyFilter(org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter) Filter(org.apache.hadoop.hbase.filter.Filter) CompareFilter(org.apache.hadoop.hbase.filter.CompareFilter) BloomFilter(org.apache.hive.common.util.BloomFilter) ArrayList(java.util.ArrayList) Database(org.apache.hadoop.hive.metastore.api.Database) Result(org.apache.hadoop.hbase.client.Result)

Example 22 with Filter

use of org.apache.hadoop.hbase.filter.Filter in project hbase by apache.

the class TestTimestampsFilter method getNVersions.

/**
   * Uses the TimestampFilter on a Get to request a specified list of
   * versions for the row/column specified by rowIdx & colIdx.
   *
   */
private Cell[] getNVersions(Table ht, byte[] cf, int rowIdx, int colIdx, List<Long> versions) throws IOException {
    byte[] row = Bytes.toBytes("row:" + rowIdx);
    byte[] column = Bytes.toBytes("column:" + colIdx);
    Filter filter = new TimestampsFilter(versions);
    Get get = new Get(row);
    get.addColumn(cf, column);
    get.setFilter(filter);
    get.setMaxVersions();
    Result result = ht.get(get);
    return result.rawCells();
}
Also used : TimestampsFilter(org.apache.hadoop.hbase.filter.TimestampsFilter) Filter(org.apache.hadoop.hbase.filter.Filter) TimestampsFilter(org.apache.hadoop.hbase.filter.TimestampsFilter)

Example 23 with Filter

use of org.apache.hadoop.hbase.filter.Filter in project hbase by apache.

the class TestHRegion method testScanner_JoinedScannersWithLimits.

/**
   * HBASE-5416
   *
   * Test case when scan limits amount of KVs returned on each next() call.
   */
@Test
public void testScanner_JoinedScannersWithLimits() throws IOException {
    final byte[] cf_first = Bytes.toBytes("first");
    final byte[] cf_second = Bytes.toBytes("second");
    this.region = initHRegion(tableName, method, CONF, cf_first, cf_second);
    try {
        final byte[] col_a = Bytes.toBytes("a");
        final byte[] col_b = Bytes.toBytes("b");
        Put put;
        for (int i = 0; i < 10; i++) {
            put = new Put(Bytes.toBytes("r" + Integer.toString(i)));
            put.addColumn(cf_first, col_a, Bytes.toBytes(i));
            if (i < 5) {
                put.addColumn(cf_first, col_b, Bytes.toBytes(i));
                put.addColumn(cf_second, col_a, Bytes.toBytes(i));
                put.addColumn(cf_second, col_b, Bytes.toBytes(i));
            }
            region.put(put);
        }
        Scan scan = new Scan();
        scan.setLoadColumnFamiliesOnDemand(true);
        Filter bogusFilter = new FilterBase() {

            @Override
            public ReturnCode filterKeyValue(Cell ignored) throws IOException {
                return ReturnCode.INCLUDE;
            }

            @Override
            public boolean isFamilyEssential(byte[] name) {
                return Bytes.equals(name, cf_first);
            }
        };
        scan.setFilter(bogusFilter);
        InternalScanner s = region.getScanner(scan);
        // Our data looks like this:
        // r0: first:a, first:b, second:a, second:b
        // r1: first:a, first:b, second:a, second:b
        // r2: first:a, first:b, second:a, second:b
        // r3: first:a, first:b, second:a, second:b
        // r4: first:a, first:b, second:a, second:b
        // r5: first:a
        // r6: first:a
        // r7: first:a
        // r8: first:a
        // r9: first:a
        // But due to next's limit set to 3, we should get this:
        // r0: first:a, first:b, second:a
        // r0: second:b
        // r1: first:a, first:b, second:a
        // r1: second:b
        // r2: first:a, first:b, second:a
        // r2: second:b
        // r3: first:a, first:b, second:a
        // r3: second:b
        // r4: first:a, first:b, second:a
        // r4: second:b
        // r5: first:a
        // r6: first:a
        // r7: first:a
        // r8: first:a
        // r9: first:a
        List<Cell> results = new ArrayList<>();
        int index = 0;
        ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(3).build();
        while (true) {
            boolean more = s.next(results, scannerContext);
            if ((index >> 1) < 5) {
                if (index % 2 == 0)
                    assertEquals(results.size(), 3);
                else
                    assertEquals(results.size(), 1);
            } else
                assertEquals(results.size(), 1);
            results.clear();
            index++;
            if (!more)
                break;
        }
    } finally {
        HBaseTestingUtility.closeRegionAndWAL(this.region);
        this.region = null;
    }
}
Also used : PrefixFilter(org.apache.hadoop.hbase.filter.PrefixFilter) ColumnCountGetFilter(org.apache.hadoop.hbase.filter.ColumnCountGetFilter) SingleColumnValueExcludeFilter(org.apache.hadoop.hbase.filter.SingleColumnValueExcludeFilter) Filter(org.apache.hadoop.hbase.filter.Filter) SingleColumnValueFilter(org.apache.hadoop.hbase.filter.SingleColumnValueFilter) ArrayList(java.util.ArrayList) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell) Put(org.apache.hadoop.hbase.client.Put) FilterBase(org.apache.hadoop.hbase.filter.FilterBase) Test(org.junit.Test)

Example 24 with Filter

use of org.apache.hadoop.hbase.filter.Filter in project hbase by apache.

the class TestHRegion method testIndexesScanWithOneDeletedRow.

@Test
public void testIndexesScanWithOneDeletedRow() throws IOException {
    byte[] family = Bytes.toBytes("family");
    // Setting up region
    this.region = initHRegion(tableName, method, CONF, family);
    try {
        Put put = new Put(Bytes.toBytes(1L));
        put.addColumn(family, qual1, 1L, Bytes.toBytes(1L));
        region.put(put);
        region.flush(true);
        Delete delete = new Delete(Bytes.toBytes(1L), 1L);
        region.delete(delete);
        put = new Put(Bytes.toBytes(2L));
        put.addColumn(family, qual1, 2L, Bytes.toBytes(2L));
        region.put(put);
        Scan idxScan = new Scan();
        idxScan.addFamily(family);
        idxScan.setFilter(new FilterList(FilterList.Operator.MUST_PASS_ALL, Arrays.<Filter>asList(new SingleColumnValueFilter(family, qual1, CompareOp.GREATER_OR_EQUAL, new BinaryComparator(Bytes.toBytes(0L))), new SingleColumnValueFilter(family, qual1, CompareOp.LESS_OR_EQUAL, new BinaryComparator(Bytes.toBytes(3L))))));
        InternalScanner scanner = region.getScanner(idxScan);
        List<Cell> res = new ArrayList<>();
        while (scanner.next(res)) ;
        assertEquals(1L, res.size());
    } finally {
        HBaseTestingUtility.closeRegionAndWAL(this.region);
        this.region = null;
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) SingleColumnValueFilter(org.apache.hadoop.hbase.filter.SingleColumnValueFilter) PrefixFilter(org.apache.hadoop.hbase.filter.PrefixFilter) ColumnCountGetFilter(org.apache.hadoop.hbase.filter.ColumnCountGetFilter) SingleColumnValueExcludeFilter(org.apache.hadoop.hbase.filter.SingleColumnValueExcludeFilter) Filter(org.apache.hadoop.hbase.filter.Filter) SingleColumnValueFilter(org.apache.hadoop.hbase.filter.SingleColumnValueFilter) ArrayList(java.util.ArrayList) Scan(org.apache.hadoop.hbase.client.Scan) FilterList(org.apache.hadoop.hbase.filter.FilterList) Cell(org.apache.hadoop.hbase.Cell) Put(org.apache.hadoop.hbase.client.Put) BinaryComparator(org.apache.hadoop.hbase.filter.BinaryComparator) Test(org.junit.Test)

Example 25 with Filter

use of org.apache.hadoop.hbase.filter.Filter in project hbase by apache.

the class TestScanner method testFilters.

@Test
public void testFilters() throws IOException {
    try {
        this.region = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
        HBaseTestCase.addContent(this.region, HConstants.CATALOG_FAMILY);
        byte[] prefix = Bytes.toBytes("ab");
        Filter newFilter = new PrefixFilter(prefix);
        Scan scan = new Scan();
        scan.setFilter(newFilter);
        rowPrefixFilter(scan);
        byte[] stopRow = Bytes.toBytes("bbc");
        newFilter = new WhileMatchFilter(new InclusiveStopFilter(stopRow));
        scan = new Scan();
        scan.setFilter(newFilter);
        rowInclusiveStopFilter(scan, stopRow);
    } finally {
        HBaseTestingUtility.closeRegionAndWAL(this.region);
    }
}
Also used : PrefixFilter(org.apache.hadoop.hbase.filter.PrefixFilter) InclusiveStopFilter(org.apache.hadoop.hbase.filter.InclusiveStopFilter) WhileMatchFilter(org.apache.hadoop.hbase.filter.WhileMatchFilter) PrefixFilter(org.apache.hadoop.hbase.filter.PrefixFilter) Filter(org.apache.hadoop.hbase.filter.Filter) WhileMatchFilter(org.apache.hadoop.hbase.filter.WhileMatchFilter) InclusiveStopFilter(org.apache.hadoop.hbase.filter.InclusiveStopFilter) Scan(org.apache.hadoop.hbase.client.Scan) Test(org.junit.Test)

Aggregations

Filter (org.apache.hadoop.hbase.filter.Filter)172 Test (org.junit.Test)96 Scan (org.apache.hadoop.hbase.client.Scan)94 BaseConnectionlessQueryTest (org.apache.phoenix.query.BaseConnectionlessQueryTest)77 SkipScanFilter (org.apache.phoenix.filter.SkipScanFilter)76 RowKeyComparisonFilter (org.apache.phoenix.filter.RowKeyComparisonFilter)74 SingleKeyValueComparisonFilter (org.apache.phoenix.filter.SingleKeyValueComparisonFilter)45 TestUtil.rowKeyFilter (org.apache.phoenix.util.TestUtil.rowKeyFilter)45 RowFilter (org.apache.hadoop.hbase.filter.RowFilter)40 FilterList (org.apache.hadoop.hbase.filter.FilterList)39 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)37 TestUtil.multiEncodedKVFilter (org.apache.phoenix.util.TestUtil.multiEncodedKVFilter)33 TestUtil.singleKVFilter (org.apache.phoenix.util.TestUtil.singleKVFilter)33 PhoenixPreparedStatement (org.apache.phoenix.jdbc.PhoenixPreparedStatement)31 FirstKeyOnlyFilter (org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter)27 PrefixFilter (org.apache.hadoop.hbase.filter.PrefixFilter)24 SingleColumnValueFilter (org.apache.hadoop.hbase.filter.SingleColumnValueFilter)23 CompareFilter (org.apache.hadoop.hbase.filter.CompareFilter)22 ArrayList (java.util.ArrayList)19 RegexStringComparator (org.apache.hadoop.hbase.filter.RegexStringComparator)18