Search in sources :

Example 21 with BinaryComparator

use of org.apache.hadoop.hbase.filter.BinaryComparator in project phoenix by apache.

the class FirstLastValueServerAggregator method aggregate.

@Override
public void aggregate(Tuple tuple, ImmutableBytesWritable ptr) {
    //set pointer to ordering by field
    orderByColumn.evaluate(tuple, ptr);
    byte[] currentOrder = ptr.copyBytes();
    if (!dataColumn.evaluate(tuple, ptr)) {
        return;
    }
    if (useOffset) {
        boolean addFlag = false;
        if (topValuesCount < offset) {
            try {
                addFlag = true;
            } catch (Exception e) {
                logger.error(e.getMessage());
            }
        } else {
            if (isAscending) {
                if (removeLastElement(currentOrder, topValues.lastKey(), -1)) {
                    addFlag = true;
                    topValuesCount--;
                }
            } else {
                if (removeLastElement(currentOrder, topValues.firstKey(), 1)) {
                    addFlag = true;
                    topValuesCount--;
                }
            }
        }
        if (addFlag) {
            topValuesCount++;
            if (!topValues.containsKey(currentOrder)) {
                topValues.put(currentOrder, new LinkedList<byte[]>());
            }
            //invert bytes if is SortOrder set
            if (hasValueDescSortOrder) {
                topValues.get(currentOrder).push(SortOrder.invert(ptr.get(), ptr.getOffset(), ptr.getLength()));
            } else {
                topValues.get(currentOrder).push(ptr.copyBytes());
            }
        }
    } else {
        boolean isHigher;
        if (isAscending) {
            isHigher = topOrder.compareTo(currentOrder) > 0;
        } else {
            //desc
            isHigher = topOrder.compareTo(currentOrder) < 0;
        }
        if (topOrder.getValue().length < 1 || isHigher) {
            if (hasValueDescSortOrder) {
                topValue = SortOrder.invert(ptr.get(), ptr.getOffset(), ptr.getLength());
            } else {
                topValue = ptr.copyBytes();
            }
            topOrder = new BinaryComparator(currentOrder);
        }
    }
}
Also used : IOException(java.io.IOException) BinaryComparator(org.apache.hadoop.hbase.filter.BinaryComparator)

Example 22 with BinaryComparator

use of org.apache.hadoop.hbase.filter.BinaryComparator in project phoenix by apache.

the class ScannerBuilder method getColumnFilters.

/**
   * @param columns columns to filter
   * @return filter that will skip any {@link KeyValue} that doesn't match one of the passed columns
   *         and the
   */
private Filter getColumnFilters(Collection<? extends ColumnReference> columns) {
    // each column needs to be added as an OR, so we need to separate them out
    FilterList columnFilters = new FilterList(FilterList.Operator.MUST_PASS_ONE);
    // create a filter that matches each column reference
    for (ColumnReference ref : columns) {
        Filter columnFilter = new FamilyFilter(CompareOp.EQUAL, new BinaryComparator(ref.getFamily()));
        // combine with a match for the qualifier, if the qualifier is a specific qualifier
        if (!Bytes.equals(ColumnReference.ALL_QUALIFIERS, ref.getQualifier())) {
            columnFilter = new FilterList(columnFilter, new QualifierFilter(CompareOp.EQUAL, new BinaryComparator(ref.getQualifier())));
        }
        columnFilters.addFilter(columnFilter);
    }
    return columnFilters;
}
Also used : ColumnTrackingNextLargestTimestampFilter(org.apache.phoenix.hbase.index.covered.filter.ColumnTrackingNextLargestTimestampFilter) FamilyFilter(org.apache.hadoop.hbase.filter.FamilyFilter) QualifierFilter(org.apache.hadoop.hbase.filter.QualifierFilter) Filter(org.apache.hadoop.hbase.filter.Filter) ApplyAndFilterDeletesFilter(org.apache.phoenix.hbase.index.covered.filter.ApplyAndFilterDeletesFilter) FilterList(org.apache.hadoop.hbase.filter.FilterList) FamilyFilter(org.apache.hadoop.hbase.filter.FamilyFilter) BinaryComparator(org.apache.hadoop.hbase.filter.BinaryComparator) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference) QualifierFilter(org.apache.hadoop.hbase.filter.QualifierFilter)

Example 23 with BinaryComparator

use of org.apache.hadoop.hbase.filter.BinaryComparator in project hadoop by apache.

the class FlowRunEntityReader method constructFilterListBasedOnFields.

@Override
protected FilterList constructFilterListBasedOnFields() throws IOException {
    FilterList list = new FilterList(Operator.MUST_PASS_ONE);
    // By default fetch everything in INFO column family.
    FamilyFilter infoColumnFamily = new FamilyFilter(CompareOp.EQUAL, new BinaryComparator(FlowRunColumnFamily.INFO.getBytes()));
    TimelineDataToRetrieve dataToRetrieve = getDataToRetrieve();
    // Metrics are always returned if we are reading a single entity.
    if (!isSingleEntityRead() && !hasField(dataToRetrieve.getFieldsToRetrieve(), Field.METRICS)) {
        FilterList infoColFamilyList = new FilterList(Operator.MUST_PASS_ONE);
        infoColFamilyList.addFilter(infoColumnFamily);
        infoColFamilyList.addFilter(new QualifierFilter(CompareOp.NOT_EQUAL, new BinaryPrefixComparator(FlowRunColumnPrefix.METRIC.getColumnPrefixBytes(""))));
        list.addFilter(infoColFamilyList);
    } else {
        // Check if metricsToRetrieve are specified and if they are, create a
        // filter list for info column family by adding flow run tables columns
        // and a list for metrics to retrieve. Pls note that fieldsToRetrieve
        // will have METRICS added to it if metricsToRetrieve are specified
        // (in augmentParams()).
        TimelineFilterList metricsToRetrieve = dataToRetrieve.getMetricsToRetrieve();
        if (metricsToRetrieve != null && !metricsToRetrieve.getFilterList().isEmpty()) {
            FilterList infoColFamilyList = new FilterList();
            infoColFamilyList.addFilter(infoColumnFamily);
            FilterList columnsList = updateFixedColumns();
            columnsList.addFilter(TimelineFilterUtils.createHBaseFilterList(FlowRunColumnPrefix.METRIC, metricsToRetrieve));
            infoColFamilyList.addFilter(columnsList);
            list.addFilter(infoColFamilyList);
        }
    }
    return list;
}
Also used : BinaryPrefixComparator(org.apache.hadoop.hbase.filter.BinaryPrefixComparator) TimelineFilterList(org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList) FilterList(org.apache.hadoop.hbase.filter.FilterList) TimelineFilterList(org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList) FamilyFilter(org.apache.hadoop.hbase.filter.FamilyFilter) TimelineDataToRetrieve(org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve) BinaryComparator(org.apache.hadoop.hbase.filter.BinaryComparator) QualifierFilter(org.apache.hadoop.hbase.filter.QualifierFilter)

Example 24 with BinaryComparator

use of org.apache.hadoop.hbase.filter.BinaryComparator in project hadoop by apache.

the class GenericEntityReader method constructFilterListBasedOnFields.

@Override
protected FilterList constructFilterListBasedOnFields() throws IOException {
    if (!needCreateFilterListBasedOnFields()) {
        // Fetch all the columns. No need of a filter.
        return null;
    }
    FilterList listBasedOnFields = new FilterList(Operator.MUST_PASS_ONE);
    FilterList infoColFamilyList = new FilterList();
    // By default fetch everything in INFO column family.
    FamilyFilter infoColumnFamily = new FamilyFilter(CompareOp.EQUAL, new BinaryComparator(EntityColumnFamily.INFO.getBytes()));
    infoColFamilyList.addFilter(infoColumnFamily);
    if (!isSingleEntityRead() && fetchPartialColsFromInfoFamily()) {
        // We can fetch only some of the columns from info family.
        infoColFamilyList.addFilter(createFilterListForColsOfInfoFamily());
    } else {
        // Exclude column prefixes in info column family which are not required
        // based on fields to retrieve.
        excludeFieldsFromInfoColFamily(infoColFamilyList);
    }
    listBasedOnFields.addFilter(infoColFamilyList);
    updateFilterForConfsAndMetricsToRetrieve(listBasedOnFields);
    return listBasedOnFields;
}
Also used : FilterList(org.apache.hadoop.hbase.filter.FilterList) TimelineFilterList(org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList) FamilyFilter(org.apache.hadoop.hbase.filter.FamilyFilter) BinaryComparator(org.apache.hadoop.hbase.filter.BinaryComparator)

Example 25 with BinaryComparator

use of org.apache.hadoop.hbase.filter.BinaryComparator in project hbase by apache.

the class TestRegionServerReadRequestMetrics method testReadRequestsCountWithFilter.

@Test
public void testReadRequestsCountWithFilter() throws Exception {
    int resultCount;
    Scan scan;
    // test for scan
    scan = new Scan();
    scan.setFilter(new SingleColumnValueFilter(CF1, COL1, CompareFilter.CompareOp.EQUAL, VAL1));
    try (ResultScanner scanner = table.getScanner(scan)) {
        resultCount = 0;
        for (Result ignore : scanner) {
            resultCount++;
        }
        testReadRequests(resultCount, 2, 1);
    }
    // test for scan
    scan = new Scan();
    scan.setFilter(new RowFilter(CompareFilter.CompareOp.EQUAL, new BinaryComparator(ROW1)));
    try (ResultScanner scanner = table.getScanner(scan)) {
        resultCount = 0;
        for (Result ignore : scanner) {
            resultCount++;
        }
        testReadRequests(resultCount, 1, 2);
    }
    // test for scan
    scan = new Scan(ROW2, ROW3);
    scan.setFilter(new RowFilter(CompareFilter.CompareOp.EQUAL, new BinaryComparator(ROW1)));
    try (ResultScanner scanner = table.getScanner(scan)) {
        resultCount = 0;
        for (Result ignore : scanner) {
            resultCount++;
        }
        testReadRequests(resultCount, 0, 1);
    }
// fixme filtered get should not increase readRequestsCount
//    Get get = new Get(ROW2);
//    get.setFilter(new SingleColumnValueFilter(CF1, COL1, CompareFilter.CompareOp.EQUAL, VAL1));
//    Result result = table.get(get);
//    resultCount = result.isEmpty() ? 0 : 1;
//    testReadRequests(resultCount, 0, 1);
}
Also used : SingleColumnValueFilter(org.apache.hadoop.hbase.filter.SingleColumnValueFilter) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) RowFilter(org.apache.hadoop.hbase.filter.RowFilter) Scan(org.apache.hadoop.hbase.client.Scan) BinaryComparator(org.apache.hadoop.hbase.filter.BinaryComparator) Result(org.apache.hadoop.hbase.client.Result) Test(org.junit.Test)

Aggregations

BinaryComparator (org.apache.hadoop.hbase.filter.BinaryComparator)41 Test (org.junit.Test)18 Filter (org.apache.hadoop.hbase.filter.Filter)15 RowFilter (org.apache.hadoop.hbase.filter.RowFilter)14 Put (org.apache.hadoop.hbase.client.Put)12 SingleColumnValueFilter (org.apache.hadoop.hbase.filter.SingleColumnValueFilter)12 Scan (org.apache.hadoop.hbase.client.Scan)9 QualifierFilter (org.apache.hadoop.hbase.filter.QualifierFilter)9 FilterList (org.apache.hadoop.hbase.filter.FilterList)8 FirstKeyOnlyFilter (org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter)7 PrefixFilter (org.apache.hadoop.hbase.filter.PrefixFilter)7 ArrayList (java.util.ArrayList)6 Cell (org.apache.hadoop.hbase.Cell)5 KeyValue (org.apache.hadoop.hbase.KeyValue)5 Delete (org.apache.hadoop.hbase.client.Delete)5 FamilyFilter (org.apache.hadoop.hbase.filter.FamilyFilter)5 InclusiveStopFilter (org.apache.hadoop.hbase.filter.InclusiveStopFilter)5 RegexStringComparator (org.apache.hadoop.hbase.filter.RegexStringComparator)5 Get (org.apache.hadoop.hbase.client.Get)4 Result (org.apache.hadoop.hbase.client.Result)4