Search in sources :

Example 16 with PageFilter

use of org.apache.hadoop.hbase.filter.PageFilter in project tephra by heisedebaise.

the class HbaseQuery method getFilter.

/**
 * 获取过滤器。
 *
 * @return 过滤器。
 */
public synchronized Filter getFilter() {
    if (pageSize > 0) {
        addFilter(new PageFilter(pageSize));
        pageSize = 0L;
    }
    return filters;
}
Also used : PageFilter(org.apache.hadoop.hbase.filter.PageFilter)

Example 17 with PageFilter

use of org.apache.hadoop.hbase.filter.PageFilter in project incubator-atlas by apache.

the class HBaseBasedAuditRepository method listEvents.

/**
 * List events for the given entity id in decreasing order of timestamp, from the given startKey. Returns n results
 * @param entityId entity id
 * @param startKey key for the first event to be returned, used for pagination
 * @param n number of events to be returned
 * @return list of events
 * @throws AtlasException
 */
public List<EntityAuditEvent> listEvents(String entityId, String startKey, short n) throws AtlasException {
    if (LOG.isDebugEnabled()) {
        LOG.debug("Listing events for entity id {}, starting timestamp {}, #records {}", entityId, startKey, n);
    }
    Table table = null;
    ResultScanner scanner = null;
    try {
        table = connection.getTable(tableName);
        /**
         * Scan Details:
         * In hbase, the events are stored in increasing order of timestamp. So, doing reverse scan to get the latest event first
         * Page filter is set to limit the number of results returned.
         * Stop row is set to the entity id to avoid going past the current entity while scanning
         * small is set to true to optimise RPC calls as the scanner is created per request
         */
        Scan scan = new Scan().setReversed(true).setFilter(new PageFilter(n)).setStopRow(Bytes.toBytes(entityId)).setCaching(n).setSmall(true);
        if (StringUtils.isEmpty(startKey)) {
            // Set start row to entity id + max long value
            byte[] entityBytes = getKey(entityId, Long.MAX_VALUE);
            scan = scan.setStartRow(entityBytes);
        } else {
            scan = scan.setStartRow(Bytes.toBytes(startKey));
        }
        scanner = table.getScanner(scan);
        Result result;
        List<EntityAuditEvent> events = new ArrayList<>();
        // So, adding extra check on n here
        while ((result = scanner.next()) != null && events.size() < n) {
            EntityAuditEvent event = fromKey(result.getRow());
            // In case the user sets random start key, guarding against random events
            if (!event.getEntityId().equals(entityId)) {
                continue;
            }
            event.setUser(getResultString(result, COLUMN_USER));
            event.setAction(EntityAuditEvent.EntityAuditAction.valueOf(getResultString(result, COLUMN_ACTION)));
            event.setDetails(getResultString(result, COLUMN_DETAIL));
            if (persistEntityDefinition) {
                String colDef = getResultString(result, COLUMN_DEFINITION);
                if (colDef != null) {
                    event.setEntityDefinition(colDef);
                }
            }
            events.add(event);
        }
        if (LOG.isDebugEnabled()) {
            LOG.debug("Got events for entity id {}, starting timestamp {}, #records {}", entityId, startKey, events.size());
        }
        return events;
    } catch (IOException e) {
        throw new AtlasException(e);
    } finally {
        close(scanner);
        close(table);
    }
}
Also used : EntityAuditEvent(org.apache.atlas.EntityAuditEvent) Table(org.apache.hadoop.hbase.client.Table) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) ArrayList(java.util.ArrayList) Scan(org.apache.hadoop.hbase.client.Scan) PageFilter(org.apache.hadoop.hbase.filter.PageFilter) IOException(java.io.IOException) AtlasException(org.apache.atlas.AtlasException) Result(org.apache.hadoop.hbase.client.Result)

Example 18 with PageFilter

use of org.apache.hadoop.hbase.filter.PageFilter in project hbase by apache.

the class TestReversibleScanners method testReversibleRegionScanner.

@Test
public void testReversibleRegionScanner() throws IOException {
    byte[] FAMILYNAME2 = Bytes.toBytes("testCf2");
    TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYNAME)).setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYNAME2)).build();
    HRegion region = TEST_UTIL.createLocalHRegion(htd, null, null);
    loadDataToRegion(region, FAMILYNAME2);
    // verify row count with forward scan
    Scan scan = new Scan();
    InternalScanner scanner = region.getScanner(scan);
    verifyCountAndOrder(scanner, ROWSIZE * QUALSIZE * 2, ROWSIZE, true);
    // Case1:Full reversed scan
    scan.setReversed(true);
    scanner = region.getScanner(scan);
    verifyCountAndOrder(scanner, ROWSIZE * QUALSIZE * 2, ROWSIZE, false);
    // Case2:Full reversed scan with one family
    scan = new Scan();
    scan.setReversed(true);
    scan.addFamily(FAMILYNAME);
    scanner = region.getScanner(scan);
    verifyCountAndOrder(scanner, ROWSIZE * QUALSIZE, ROWSIZE, false);
    // Case3:Specify qualifiers + One family
    byte[][] specifiedQualifiers = { QUALS[1], QUALS[2] };
    for (byte[] specifiedQualifier : specifiedQualifiers) scan.addColumn(FAMILYNAME, specifiedQualifier);
    scanner = region.getScanner(scan);
    verifyCountAndOrder(scanner, ROWSIZE * 2, ROWSIZE, false);
    // Case4:Specify qualifiers + Two families
    for (byte[] specifiedQualifier : specifiedQualifiers) scan.addColumn(FAMILYNAME2, specifiedQualifier);
    scanner = region.getScanner(scan);
    verifyCountAndOrder(scanner, ROWSIZE * 2 * 2, ROWSIZE, false);
    // Case5: Case4 + specify start row
    int startRowNum = ROWSIZE * 3 / 4;
    scan.withStartRow(ROWS[startRowNum]);
    scanner = region.getScanner(scan);
    verifyCountAndOrder(scanner, (startRowNum + 1) * 2 * 2, (startRowNum + 1), false);
    // Case6: Case4 + specify stop row
    int stopRowNum = ROWSIZE / 4;
    scan.withStartRow(HConstants.EMPTY_BYTE_ARRAY);
    scan.withStopRow(ROWS[stopRowNum]);
    scanner = region.getScanner(scan);
    verifyCountAndOrder(scanner, (ROWSIZE - stopRowNum - 1) * 2 * 2, (ROWSIZE - stopRowNum - 1), false);
    // Case7: Case4 + specify start row + specify stop row
    scan.withStartRow(ROWS[startRowNum]);
    scanner = region.getScanner(scan);
    verifyCountAndOrder(scanner, (startRowNum - stopRowNum) * 2 * 2, (startRowNum - stopRowNum), false);
    // Case8: Case7 + SingleColumnValueFilter
    int valueNum = startRowNum % VALUESIZE;
    Filter filter = new SingleColumnValueFilter(FAMILYNAME, specifiedQualifiers[0], CompareOperator.EQUAL, VALUES[valueNum]);
    scan.setFilter(filter);
    scanner = region.getScanner(scan);
    int unfilteredRowNum = (startRowNum - stopRowNum) / VALUESIZE + (stopRowNum / VALUESIZE == valueNum ? 0 : 1);
    verifyCountAndOrder(scanner, unfilteredRowNum * 2 * 2, unfilteredRowNum, false);
    // Case9: Case7 + PageFilter
    int pageSize = 10;
    filter = new PageFilter(pageSize);
    scan.setFilter(filter);
    scanner = region.getScanner(scan);
    int expectedRowNum = pageSize;
    verifyCountAndOrder(scanner, expectedRowNum * 2 * 2, expectedRowNum, false);
    // Case10: Case7 + FilterList+MUST_PASS_ONE
    SingleColumnValueFilter scvFilter1 = new SingleColumnValueFilter(FAMILYNAME, specifiedQualifiers[0], CompareOperator.EQUAL, VALUES[0]);
    SingleColumnValueFilter scvFilter2 = new SingleColumnValueFilter(FAMILYNAME, specifiedQualifiers[0], CompareOperator.EQUAL, VALUES[1]);
    expectedRowNum = 0;
    for (int i = startRowNum; i > stopRowNum; i--) {
        if (i % VALUESIZE == 0 || i % VALUESIZE == 1) {
            expectedRowNum++;
        }
    }
    filter = new FilterList(Operator.MUST_PASS_ONE, scvFilter1, scvFilter2);
    scan.setFilter(filter);
    scanner = region.getScanner(scan);
    verifyCountAndOrder(scanner, expectedRowNum * 2 * 2, expectedRowNum, false);
    // Case10: Case7 + FilterList+MUST_PASS_ALL
    filter = new FilterList(Operator.MUST_PASS_ALL, scvFilter1, scvFilter2);
    expectedRowNum = 0;
    scan.setFilter(filter);
    scanner = region.getScanner(scan);
    verifyCountAndOrder(scanner, expectedRowNum * 2 * 2, expectedRowNum, false);
}
Also used : SingleColumnValueFilter(org.apache.hadoop.hbase.filter.SingleColumnValueFilter) PageFilter(org.apache.hadoop.hbase.filter.PageFilter) Filter(org.apache.hadoop.hbase.filter.Filter) SingleColumnValueFilter(org.apache.hadoop.hbase.filter.SingleColumnValueFilter) Scan(org.apache.hadoop.hbase.client.Scan) PageFilter(org.apache.hadoop.hbase.filter.PageFilter) FilterList(org.apache.hadoop.hbase.filter.FilterList) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Test(org.junit.Test)

Aggregations

PageFilter (org.apache.hadoop.hbase.filter.PageFilter)18 Scan (org.apache.hadoop.hbase.client.Scan)14 IOException (java.io.IOException)9 Result (org.apache.hadoop.hbase.client.Result)8 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)8 FilterList (org.apache.hadoop.hbase.filter.FilterList)6 Table (org.apache.hadoop.hbase.client.Table)5 ArrayList (java.util.ArrayList)4 Cell (org.apache.hadoop.hbase.Cell)4 HashMap (java.util.HashMap)3 AtlasException (org.apache.atlas.AtlasException)3 Filter (org.apache.hadoop.hbase.filter.Filter)3 ByteArrayByteIterator (com.yahoo.ycsb.ByteArrayByteIterator)2 ByteIterator (com.yahoo.ycsb.ByteIterator)2 EntityAuditEvent (org.apache.atlas.EntityAuditEvent)2 KeyValue (org.apache.hadoop.hbase.KeyValue)2 FirstKeyOnlyFilter (org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter)2 TimelineReaderContext (org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext)2 TimelineFilterList (org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList)2 DistinctPrefixFilter (org.apache.phoenix.filter.DistinctPrefixFilter)2