Search in sources :

Example 21 with FilterList

use of org.apache.hadoop.hbase.filter.FilterList in project hbase by apache.

the class TestHRegion method testIndexesScanWithOneDeletedRow.

@Test
public void testIndexesScanWithOneDeletedRow() throws IOException {
    byte[] family = Bytes.toBytes("family");
    // Setting up region
    this.region = initHRegion(tableName, method, CONF, family);
    try {
        Put put = new Put(Bytes.toBytes(1L));
        put.addColumn(family, qual1, 1L, Bytes.toBytes(1L));
        region.put(put);
        region.flush(true);
        Delete delete = new Delete(Bytes.toBytes(1L), 1L);
        region.delete(delete);
        put = new Put(Bytes.toBytes(2L));
        put.addColumn(family, qual1, 2L, Bytes.toBytes(2L));
        region.put(put);
        Scan idxScan = new Scan();
        idxScan.addFamily(family);
        idxScan.setFilter(new FilterList(FilterList.Operator.MUST_PASS_ALL, Arrays.<Filter>asList(new SingleColumnValueFilter(family, qual1, CompareOp.GREATER_OR_EQUAL, new BinaryComparator(Bytes.toBytes(0L))), new SingleColumnValueFilter(family, qual1, CompareOp.LESS_OR_EQUAL, new BinaryComparator(Bytes.toBytes(3L))))));
        InternalScanner scanner = region.getScanner(idxScan);
        List<Cell> res = new ArrayList<>();
        while (scanner.next(res)) ;
        assertEquals(1L, res.size());
    } finally {
        HBaseTestingUtility.closeRegionAndWAL(this.region);
        this.region = null;
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) SingleColumnValueFilter(org.apache.hadoop.hbase.filter.SingleColumnValueFilter) PrefixFilter(org.apache.hadoop.hbase.filter.PrefixFilter) ColumnCountGetFilter(org.apache.hadoop.hbase.filter.ColumnCountGetFilter) SingleColumnValueExcludeFilter(org.apache.hadoop.hbase.filter.SingleColumnValueExcludeFilter) Filter(org.apache.hadoop.hbase.filter.Filter) SingleColumnValueFilter(org.apache.hadoop.hbase.filter.SingleColumnValueFilter) ArrayList(java.util.ArrayList) Scan(org.apache.hadoop.hbase.client.Scan) FilterList(org.apache.hadoop.hbase.filter.FilterList) Cell(org.apache.hadoop.hbase.Cell) Put(org.apache.hadoop.hbase.client.Put) BinaryComparator(org.apache.hadoop.hbase.filter.BinaryComparator) Test(org.junit.Test)

Example 22 with FilterList

use of org.apache.hadoop.hbase.filter.FilterList in project pinpoint by naver.

the class HbaseAgentEventDao method getAgentEvents.

@Override
public List<AgentEventBo> getAgentEvents(String agentId, Range range, Set<AgentEventType> excludeEventTypes) {
    if (agentId == null) {
        throw new NullPointerException("agentId must not be null");
    }
    if (range == null) {
        throw new NullPointerException("range must not be null");
    }
    Scan scan = new Scan();
    scan.setMaxVersions(1);
    scan.setCaching(SCANNER_CACHE_SIZE);
    scan.setStartRow(createRowKey(agentId, range.getTo()));
    scan.setStopRow(createRowKey(agentId, range.getFrom()));
    scan.addFamily(HBaseTables.AGENT_EVENT_CF_EVENTS);
    if (!CollectionUtils.isEmpty(excludeEventTypes)) {
        FilterList filterList = new FilterList(FilterList.Operator.MUST_PASS_ALL);
        for (AgentEventType excludeEventType : excludeEventTypes) {
            byte[] excludeQualifier = Bytes.toBytes(excludeEventType.getCode());
            filterList.addFilter(new QualifierFilter(CompareFilter.CompareOp.NOT_EQUAL, new BinaryComparator(excludeQualifier)));
        }
        scan.setFilter(filterList);
    }
    List<AgentEventBo> agentEvents = this.hbaseOperations2.find(HBaseTables.AGENT_EVENT, scan, agentEventResultsExtractor);
    logger.debug("agentEvents found. {}", agentEvents);
    return agentEvents;
}
Also used : AgentEventType(com.navercorp.pinpoint.common.server.util.AgentEventType) AgentEventBo(com.navercorp.pinpoint.common.server.bo.AgentEventBo) Scan(org.apache.hadoop.hbase.client.Scan) FilterList(org.apache.hadoop.hbase.filter.FilterList) BinaryComparator(org.apache.hadoop.hbase.filter.BinaryComparator) QualifierFilter(org.apache.hadoop.hbase.filter.QualifierFilter)

Example 23 with FilterList

use of org.apache.hadoop.hbase.filter.FilterList in project phoenix by apache.

the class PhoenixRuntimeIT method getUserTableAndViewsFilter.

private static Filter getUserTableAndViewsFilter() {
    SingleColumnValueFilter tableFilter = new SingleColumnValueFilter(TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.TABLE_TYPE_BYTES, CompareOp.EQUAL, Bytes.toBytes(PTableType.TABLE.getSerializedValue()));
    tableFilter.setFilterIfMissing(true);
    SingleColumnValueFilter viewFilter = new SingleColumnValueFilter(TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.TABLE_TYPE_BYTES, CompareOp.EQUAL, Bytes.toBytes(PTableType.VIEW.getSerializedValue()));
    viewFilter.setFilterIfMissing(true);
    FilterList filter = new FilterList(FilterList.Operator.MUST_PASS_ONE, Arrays.asList(new Filter[] { tableFilter, viewFilter }));
    return filter;
}
Also used : SingleColumnValueFilter(org.apache.hadoop.hbase.filter.SingleColumnValueFilter) FirstKeyOnlyFilter(org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter) Filter(org.apache.hadoop.hbase.filter.Filter) SingleColumnValueFilter(org.apache.hadoop.hbase.filter.SingleColumnValueFilter) FilterList(org.apache.hadoop.hbase.filter.FilterList)

Example 24 with FilterList

use of org.apache.hadoop.hbase.filter.FilterList in project phoenix by apache.

the class ScanUtil method intersectScanRange.

public static boolean intersectScanRange(Scan scan, byte[] startKey, byte[] stopKey, boolean useSkipScan) {
    boolean mayHaveRows = false;
    int offset = 0;
    if (ScanUtil.isLocalIndex(scan)) {
        offset = startKey.length != 0 ? startKey.length : stopKey.length;
    }
    byte[] existingStartKey = scan.getStartRow();
    byte[] existingStopKey = scan.getStopRow();
    if (existingStartKey.length > 0) {
        if (startKey.length == 0 || Bytes.compareTo(existingStartKey, startKey) > 0) {
            startKey = existingStartKey;
        }
    } else {
        mayHaveRows = true;
    }
    if (existingStopKey.length > 0) {
        if (stopKey.length == 0 || Bytes.compareTo(existingStopKey, stopKey) < 0) {
            stopKey = existingStopKey;
        }
    } else {
        mayHaveRows = true;
    }
    scan.setStartRow(startKey);
    scan.setStopRow(stopKey);
    if (offset > 0 && useSkipScan) {
        byte[] temp = null;
        if (startKey.length != 0) {
            temp = new byte[startKey.length - offset];
            System.arraycopy(startKey, offset, temp, 0, startKey.length - offset);
            startKey = temp;
        }
        if (stopKey.length != 0) {
            temp = new byte[stopKey.length - offset];
            System.arraycopy(stopKey, offset, temp, 0, stopKey.length - offset);
            stopKey = temp;
        }
    }
    mayHaveRows = mayHaveRows || Bytes.compareTo(scan.getStartRow(), scan.getStopRow()) < 0;
    // If the scan is using skip scan filter, intersect and replace the filter.
    if (mayHaveRows && useSkipScan) {
        Filter filter = scan.getFilter();
        if (filter instanceof SkipScanFilter) {
            SkipScanFilter oldFilter = (SkipScanFilter) filter;
            SkipScanFilter newFilter = oldFilter.intersect(startKey, stopKey);
            if (newFilter == null) {
                return false;
            }
            // Intersect found: replace skip scan with intersected one
            scan.setFilter(newFilter);
        } else if (filter instanceof FilterList) {
            FilterList oldList = (FilterList) filter;
            FilterList newList = new FilterList(FilterList.Operator.MUST_PASS_ALL);
            for (Filter f : oldList.getFilters()) {
                if (f instanceof SkipScanFilter) {
                    SkipScanFilter newFilter = ((SkipScanFilter) f).intersect(startKey, stopKey);
                    if (newFilter == null) {
                        return false;
                    }
                    newList.addFilter(newFilter);
                } else {
                    newList.addFilter(f);
                }
            }
            scan.setFilter(newList);
        }
    }
    return mayHaveRows;
}
Also used : SkipScanFilter(org.apache.phoenix.filter.SkipScanFilter) DistinctPrefixFilter(org.apache.phoenix.filter.DistinctPrefixFilter) Filter(org.apache.hadoop.hbase.filter.Filter) BooleanExpressionFilter(org.apache.phoenix.filter.BooleanExpressionFilter) MultiEncodedCQKeyValueComparisonFilter(org.apache.phoenix.filter.MultiEncodedCQKeyValueComparisonFilter) FilterList(org.apache.hadoop.hbase.filter.FilterList) SkipScanFilter(org.apache.phoenix.filter.SkipScanFilter)

Example 25 with FilterList

use of org.apache.hadoop.hbase.filter.FilterList in project phoenix by apache.

the class ScannerBuilder method getColumnFilters.

/**
   * @param columns columns to filter
   * @return filter that will skip any {@link KeyValue} that doesn't match one of the passed columns
   *         and the
   */
private Filter getColumnFilters(Collection<? extends ColumnReference> columns) {
    // each column needs to be added as an OR, so we need to separate them out
    FilterList columnFilters = new FilterList(FilterList.Operator.MUST_PASS_ONE);
    // create a filter that matches each column reference
    for (ColumnReference ref : columns) {
        Filter columnFilter = new FamilyFilter(CompareOp.EQUAL, new BinaryComparator(ref.getFamily()));
        // combine with a match for the qualifier, if the qualifier is a specific qualifier
        if (!Bytes.equals(ColumnReference.ALL_QUALIFIERS, ref.getQualifier())) {
            columnFilter = new FilterList(columnFilter, new QualifierFilter(CompareOp.EQUAL, new BinaryComparator(ref.getQualifier())));
        }
        columnFilters.addFilter(columnFilter);
    }
    return columnFilters;
}
Also used : ColumnTrackingNextLargestTimestampFilter(org.apache.phoenix.hbase.index.covered.filter.ColumnTrackingNextLargestTimestampFilter) FamilyFilter(org.apache.hadoop.hbase.filter.FamilyFilter) QualifierFilter(org.apache.hadoop.hbase.filter.QualifierFilter) Filter(org.apache.hadoop.hbase.filter.Filter) ApplyAndFilterDeletesFilter(org.apache.phoenix.hbase.index.covered.filter.ApplyAndFilterDeletesFilter) FilterList(org.apache.hadoop.hbase.filter.FilterList) FamilyFilter(org.apache.hadoop.hbase.filter.FamilyFilter) BinaryComparator(org.apache.hadoop.hbase.filter.BinaryComparator) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference) QualifierFilter(org.apache.hadoop.hbase.filter.QualifierFilter)

Aggregations

FilterList (org.apache.hadoop.hbase.filter.FilterList)64 Filter (org.apache.hadoop.hbase.filter.Filter)32 Scan (org.apache.hadoop.hbase.client.Scan)16 QualifierFilter (org.apache.hadoop.hbase.filter.QualifierFilter)10 TimelineFilterList (org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList)10 SingleColumnValueFilter (org.apache.hadoop.hbase.filter.SingleColumnValueFilter)9 BinaryComparator (org.apache.hadoop.hbase.filter.BinaryComparator)8 Test (org.junit.Test)8 ConsumerConfig (co.cask.cdap.data2.queue.ConsumerConfig)7 FamilyFilter (org.apache.hadoop.hbase.filter.FamilyFilter)7 Transaction (org.apache.tephra.Transaction)7 PrefixFilter (org.apache.hadoop.hbase.filter.PrefixFilter)6 IOException (java.io.IOException)5 ArrayList (java.util.ArrayList)5 Result (org.apache.hadoop.hbase.client.Result)5 PageFilter (org.apache.hadoop.hbase.filter.PageFilter)5 Cell (org.apache.hadoop.hbase.Cell)4 TableName (org.apache.hadoop.hbase.TableName)4 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)4 FirstKeyOnlyFilter (org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter)4