Search in sources :

Example 51 with FilterList

use of org.apache.hadoop.hbase.filter.FilterList in project hbase by apache.

the class TestReversibleScanners method testReversibleRegionScanner.

@Test
public void testReversibleRegionScanner() throws IOException {
    byte[] FAMILYNAME2 = Bytes.toBytes("testCf2");
    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName())).addFamily(new HColumnDescriptor(FAMILYNAME)).addFamily(new HColumnDescriptor(FAMILYNAME2));
    Region region = TEST_UTIL.createLocalHRegion(htd, null, null);
    loadDataToRegion(region, FAMILYNAME2);
    // verify row count with forward scan
    Scan scan = new Scan();
    InternalScanner scanner = region.getScanner(scan);
    verifyCountAndOrder(scanner, ROWSIZE * QUALSIZE * 2, ROWSIZE, true);
    // Case1:Full reversed scan
    scan.setReversed(true);
    scanner = region.getScanner(scan);
    verifyCountAndOrder(scanner, ROWSIZE * QUALSIZE * 2, ROWSIZE, false);
    // Case2:Full reversed scan with one family
    scan = new Scan();
    scan.setReversed(true);
    scan.addFamily(FAMILYNAME);
    scanner = region.getScanner(scan);
    verifyCountAndOrder(scanner, ROWSIZE * QUALSIZE, ROWSIZE, false);
    // Case3:Specify qualifiers + One family
    byte[][] specifiedQualifiers = { QUALS[1], QUALS[2] };
    for (byte[] specifiedQualifier : specifiedQualifiers) scan.addColumn(FAMILYNAME, specifiedQualifier);
    scanner = region.getScanner(scan);
    verifyCountAndOrder(scanner, ROWSIZE * 2, ROWSIZE, false);
    // Case4:Specify qualifiers + Two families
    for (byte[] specifiedQualifier : specifiedQualifiers) scan.addColumn(FAMILYNAME2, specifiedQualifier);
    scanner = region.getScanner(scan);
    verifyCountAndOrder(scanner, ROWSIZE * 2 * 2, ROWSIZE, false);
    // Case5: Case4 + specify start row
    int startRowNum = ROWSIZE * 3 / 4;
    scan.setStartRow(ROWS[startRowNum]);
    scanner = region.getScanner(scan);
    verifyCountAndOrder(scanner, (startRowNum + 1) * 2 * 2, (startRowNum + 1), false);
    // Case6: Case4 + specify stop row
    int stopRowNum = ROWSIZE / 4;
    scan.setStartRow(HConstants.EMPTY_BYTE_ARRAY);
    scan.setStopRow(ROWS[stopRowNum]);
    scanner = region.getScanner(scan);
    verifyCountAndOrder(scanner, (ROWSIZE - stopRowNum - 1) * 2 * 2, (ROWSIZE - stopRowNum - 1), false);
    // Case7: Case4 + specify start row + specify stop row
    scan.setStartRow(ROWS[startRowNum]);
    scanner = region.getScanner(scan);
    verifyCountAndOrder(scanner, (startRowNum - stopRowNum) * 2 * 2, (startRowNum - stopRowNum), false);
    // Case8: Case7 + SingleColumnValueFilter
    int valueNum = startRowNum % VALUESIZE;
    Filter filter = new SingleColumnValueFilter(FAMILYNAME, specifiedQualifiers[0], CompareOp.EQUAL, VALUES[valueNum]);
    scan.setFilter(filter);
    scanner = region.getScanner(scan);
    int unfilteredRowNum = (startRowNum - stopRowNum) / VALUESIZE + (stopRowNum / VALUESIZE == valueNum ? 0 : 1);
    verifyCountAndOrder(scanner, unfilteredRowNum * 2 * 2, unfilteredRowNum, false);
    // Case9: Case7 + PageFilter
    int pageSize = 10;
    filter = new PageFilter(pageSize);
    scan.setFilter(filter);
    scanner = region.getScanner(scan);
    int expectedRowNum = pageSize;
    verifyCountAndOrder(scanner, expectedRowNum * 2 * 2, expectedRowNum, false);
    // Case10: Case7 + FilterList+MUST_PASS_ONE
    SingleColumnValueFilter scvFilter1 = new SingleColumnValueFilter(FAMILYNAME, specifiedQualifiers[0], CompareOp.EQUAL, VALUES[0]);
    SingleColumnValueFilter scvFilter2 = new SingleColumnValueFilter(FAMILYNAME, specifiedQualifiers[0], CompareOp.EQUAL, VALUES[1]);
    expectedRowNum = 0;
    for (int i = startRowNum; i > stopRowNum; i--) {
        if (i % VALUESIZE == 0 || i % VALUESIZE == 1) {
            expectedRowNum++;
        }
    }
    filter = new FilterList(Operator.MUST_PASS_ONE, scvFilter1, scvFilter2);
    scan.setFilter(filter);
    scanner = region.getScanner(scan);
    verifyCountAndOrder(scanner, expectedRowNum * 2 * 2, expectedRowNum, false);
    // Case10: Case7 + FilterList+MUST_PASS_ALL
    filter = new FilterList(Operator.MUST_PASS_ALL, scvFilter1, scvFilter2);
    expectedRowNum = 0;
    scan.setFilter(filter);
    scanner = region.getScanner(scan);
    verifyCountAndOrder(scanner, expectedRowNum * 2 * 2, expectedRowNum, false);
}
Also used : SingleColumnValueFilter(org.apache.hadoop.hbase.filter.SingleColumnValueFilter) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) PageFilter(org.apache.hadoop.hbase.filter.PageFilter) Filter(org.apache.hadoop.hbase.filter.Filter) SingleColumnValueFilter(org.apache.hadoop.hbase.filter.SingleColumnValueFilter) Scan(org.apache.hadoop.hbase.client.Scan) PageFilter(org.apache.hadoop.hbase.filter.PageFilter) FilterList(org.apache.hadoop.hbase.filter.FilterList) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Test(org.junit.Test)

Example 52 with FilterList

use of org.apache.hadoop.hbase.filter.FilterList in project titan by thinkaurelius.

the class HBaseKeyColumnValueStore method getFilter.

public static Filter getFilter(SliceQuery query) {
    byte[] colStartBytes = query.getSliceEnd().length() > 0 ? query.getSliceStart().as(StaticBuffer.ARRAY_FACTORY) : null;
    byte[] colEndBytes = query.getSliceEnd().length() > 0 ? query.getSliceEnd().as(StaticBuffer.ARRAY_FACTORY) : null;
    Filter filter = new ColumnRangeFilter(colStartBytes, true, colEndBytes, false);
    if (query.hasLimit()) {
        filter = new FilterList(FilterList.Operator.MUST_PASS_ALL, filter, new ColumnPaginationFilter(query.getLimit(), 0));
    }
    logger.debug("Generated HBase Filter {}", filter);
    return filter;
}
Also used : ColumnPaginationFilter(org.apache.hadoop.hbase.filter.ColumnPaginationFilter) Filter(org.apache.hadoop.hbase.filter.Filter) ColumnRangeFilter(org.apache.hadoop.hbase.filter.ColumnRangeFilter) ColumnRangeFilter(org.apache.hadoop.hbase.filter.ColumnRangeFilter) FilterList(org.apache.hadoop.hbase.filter.FilterList) ColumnPaginationFilter(org.apache.hadoop.hbase.filter.ColumnPaginationFilter)

Example 53 with FilterList

use of org.apache.hadoop.hbase.filter.FilterList in project pinpoint by naver.

the class HbaseApplicationTraceIndexDao method makeResponseTimeFilter.

/**
     * make the hbase filter for selecting values of y-axis(response time) in order to select transactions in scatter chart.
     * 4 bytes for elapsed time should be attached for the prefix of column qualifier for to use this filter.
     *
     * @param area
     * @param offsetTransactionId
     * @param offsetTransactionElapsed
     * @return
     */
private Filter makeResponseTimeFilter(final SelectedScatterArea area, final TransactionId offsetTransactionId, int offsetTransactionElapsed) {
    // filter by response time
    ResponseTimeRange responseTimeRange = area.getResponseTimeRange();
    byte[] responseFrom = Bytes.toBytes(responseTimeRange.getFrom());
    byte[] responseTo = Bytes.toBytes(responseTimeRange.getTo());
    FilterList filterList = new FilterList(Operator.MUST_PASS_ALL);
    filterList.addFilter(new QualifierFilter(CompareOp.GREATER_OR_EQUAL, new BinaryPrefixComparator(responseFrom)));
    filterList.addFilter(new QualifierFilter(CompareOp.LESS_OR_EQUAL, new BinaryPrefixComparator(responseTo)));
    // add offset
    if (offsetTransactionId != null) {
        final Buffer buffer = new AutomaticBuffer(32);
        buffer.putInt(offsetTransactionElapsed);
        buffer.putPrefixedString(offsetTransactionId.getAgentId());
        buffer.putSVLong(offsetTransactionId.getAgentStartTime());
        buffer.putVLong(offsetTransactionId.getTransactionSequence());
        byte[] qualifierOffset = buffer.getBuffer();
        filterList.addFilter(new QualifierFilter(CompareOp.GREATER, new BinaryPrefixComparator(qualifierOffset)));
    }
    return filterList;
}
Also used : AutomaticBuffer(com.navercorp.pinpoint.common.buffer.AutomaticBuffer) Buffer(com.navercorp.pinpoint.common.buffer.Buffer) BinaryPrefixComparator(org.apache.hadoop.hbase.filter.BinaryPrefixComparator) ResponseTimeRange(com.navercorp.pinpoint.web.vo.ResponseTimeRange) AutomaticBuffer(com.navercorp.pinpoint.common.buffer.AutomaticBuffer) FilterList(org.apache.hadoop.hbase.filter.FilterList) QualifierFilter(org.apache.hadoop.hbase.filter.QualifierFilter)

Example 54 with FilterList

use of org.apache.hadoop.hbase.filter.FilterList in project camel by apache.

the class HBaseProducer method scanCells.

/**
     * Performs an HBase {@link Get} on a specific row, using a collection of values (family/column/value pairs).
     * The result is <p>the most recent entry</p> for each column.
     */
private List<HBaseRow> scanCells(Table table, HBaseRow model, String start, String stop, Integer maxRowScan, List<Filter> filters) throws Exception {
    List<HBaseRow> rowSet = new LinkedList<>();
    HBaseRow startRow = new HBaseRow(model.getCells());
    startRow.setId(start);
    Scan scan;
    if (start != null) {
        scan = new Scan(Bytes.toBytes(start));
    } else {
        scan = new Scan();
    }
    if (ObjectHelper.isNotEmpty(stop)) {
        scan.setStopRow(Bytes.toBytes(stop));
    }
    if (filters != null && !filters.isEmpty()) {
        for (int i = 0; i < filters.size(); i++) {
            ((ModelAwareFilter<?>) filters.get(i)).apply(endpoint.getCamelContext(), model);
            scan.setFilter(new FilterList(FilterList.Operator.MUST_PASS_ALL, ((ModelAwareFilter<?>) filters.get(i)).getFilteredList()));
        }
    }
    Set<HBaseCell> cellModels = model.getCells();
    for (HBaseCell cellModel : cellModels) {
        String family = cellModel.getFamily();
        String column = cellModel.getQualifier();
        if (ObjectHelper.isNotEmpty(family) && ObjectHelper.isNotEmpty(column)) {
            scan.addColumn(HBaseHelper.getHBaseFieldAsBytes(family), HBaseHelper.getHBaseFieldAsBytes(column));
        }
    }
    ResultScanner resultScanner = table.getScanner(scan);
    int count = 0;
    Result result = resultScanner.next();
    while (result != null && count < maxRowScan) {
        HBaseRow resultRow = new HBaseRow();
        resultRow.setId(endpoint.getCamelContext().getTypeConverter().convertTo(model.getRowType(), result.getRow()));
        resultRow.setTimestamp(result.rawCells()[0].getTimestamp());
        cellModels = model.getCells();
        for (HBaseCell modelCell : cellModels) {
            HBaseCell resultCell = new HBaseCell();
            String family = modelCell.getFamily();
            String column = modelCell.getQualifier();
            resultRow.setId(endpoint.getCamelContext().getTypeConverter().convertTo(model.getRowType(), result.getRow()));
            resultCell.setValue(endpoint.getCamelContext().getTypeConverter().convertTo(modelCell.getValueType(), result.getValue(HBaseHelper.getHBaseFieldAsBytes(family), HBaseHelper.getHBaseFieldAsBytes(column))));
            resultCell.setFamily(modelCell.getFamily());
            resultCell.setQualifier(modelCell.getQualifier());
            Cell cell = result.getColumnLatestCell(HBaseHelper.getHBaseFieldAsBytes(family), HBaseHelper.getHBaseFieldAsBytes(column));
            if (cell != null) {
                resultCell.setTimestamp(cell.getTimestamp());
            }
            resultRow.getCells().add(resultCell);
        }
        rowSet.add(resultRow);
        count++;
        result = resultScanner.next();
    }
    return rowSet;
}
Also used : ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) FilterList(org.apache.hadoop.hbase.filter.FilterList) LinkedList(java.util.LinkedList) HBaseCell(org.apache.camel.component.hbase.model.HBaseCell) Result(org.apache.hadoop.hbase.client.Result) HBaseRow(org.apache.camel.component.hbase.model.HBaseRow) ModelAwareFilter(org.apache.camel.component.hbase.filters.ModelAwareFilter) Scan(org.apache.hadoop.hbase.client.Scan) HBaseCell(org.apache.camel.component.hbase.model.HBaseCell) Cell(org.apache.hadoop.hbase.Cell)

Example 55 with FilterList

use of org.apache.hadoop.hbase.filter.FilterList in project incubator-atlas by apache.

the class HBaseKeyColumnValueStore method getFilter.

public static Filter getFilter(SliceQuery query) {
    byte[] colStartBytes = query.getSliceEnd().length() > 0 ? query.getSliceStart().as(StaticBuffer.ARRAY_FACTORY) : null;
    byte[] colEndBytes = query.getSliceEnd().length() > 0 ? query.getSliceEnd().as(StaticBuffer.ARRAY_FACTORY) : null;
    Filter filter = new ColumnRangeFilter(colStartBytes, true, colEndBytes, false);
    if (query.hasLimit()) {
        filter = new FilterList(FilterList.Operator.MUST_PASS_ALL, filter, new ColumnPaginationFilter(query.getLimit(), 0));
    }
    logger.debug("Generated HBase Filter {}", filter);
    return filter;
}
Also used : ColumnPaginationFilter(org.apache.hadoop.hbase.filter.ColumnPaginationFilter) ColumnRangeFilter(org.apache.hadoop.hbase.filter.ColumnRangeFilter) Filter(org.apache.hadoop.hbase.filter.Filter) ColumnRangeFilter(org.apache.hadoop.hbase.filter.ColumnRangeFilter) FilterList(org.apache.hadoop.hbase.filter.FilterList) ColumnPaginationFilter(org.apache.hadoop.hbase.filter.ColumnPaginationFilter)

Aggregations

FilterList (org.apache.hadoop.hbase.filter.FilterList)68 Filter (org.apache.hadoop.hbase.filter.Filter)36 Scan (org.apache.hadoop.hbase.client.Scan)16 QualifierFilter (org.apache.hadoop.hbase.filter.QualifierFilter)10 SingleColumnValueFilter (org.apache.hadoop.hbase.filter.SingleColumnValueFilter)10 TimelineFilterList (org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList)10 BinaryComparator (org.apache.hadoop.hbase.filter.BinaryComparator)9 Test (org.junit.Test)8 ConsumerConfig (co.cask.cdap.data2.queue.ConsumerConfig)7 ArrayList (java.util.ArrayList)7 FamilyFilter (org.apache.hadoop.hbase.filter.FamilyFilter)7 Transaction (org.apache.tephra.Transaction)7 IOException (java.io.IOException)6 PrefixFilter (org.apache.hadoop.hbase.filter.PrefixFilter)6 Result (org.apache.hadoop.hbase.client.Result)5 PageFilter (org.apache.hadoop.hbase.filter.PageFilter)5 Cell (org.apache.hadoop.hbase.Cell)4 TableName (org.apache.hadoop.hbase.TableName)4 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)4 FirstKeyOnlyFilter (org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter)4