Search in sources :

Example 16 with SingleColumnValueFilter

use of org.apache.hadoop.hbase.filter.SingleColumnValueFilter in project cdap by caskdata.

the class HBase96QueueConsumer method createStateFilter.

/**
   * Creates a HBase filter that will filter out rows with state column state = PROCESSED (ignoring transaction).
   */
private Filter createStateFilter() {
    byte[] processedMask = new byte[Ints.BYTES * 2 + 1];
    processedMask[processedMask.length - 1] = ConsumerEntryState.PROCESSED.getState();
    return new SingleColumnValueFilter(QueueEntryRow.COLUMN_FAMILY, stateColumnName, CompareFilter.CompareOp.NOT_EQUAL, new BitComparator(processedMask, BitComparator.BitwiseOp.AND));
}
Also used : SingleColumnValueFilter(org.apache.hadoop.hbase.filter.SingleColumnValueFilter) BitComparator(org.apache.hadoop.hbase.filter.BitComparator)

Example 17 with SingleColumnValueFilter

use of org.apache.hadoop.hbase.filter.SingleColumnValueFilter in project drill by apache.

the class HBaseFilterBuilder method createHBaseScanSpec.

private HBaseScanSpec createHBaseScanSpec(FunctionCall call, CompareFunctionsProcessor processor) {
    String functionName = processor.getFunctionName();
    SchemaPath field = processor.getPath();
    byte[] fieldValue = processor.getValue();
    boolean sortOrderAscending = processor.isSortOrderAscending();
    boolean isRowKey = field.getAsUnescapedPath().equals(ROW_KEY);
    if (!(isRowKey || (!field.getRootSegment().isLastPath() && field.getRootSegment().getChild().isLastPath() && field.getRootSegment().getChild().isNamed()))) {
        /*
       * if the field in this function is neither the row_key nor a qualified HBase column, return.
       */
        return null;
    }
    if (processor.isRowKeyPrefixComparison()) {
        return createRowKeyPrefixScanSpec(call, processor);
    }
    CompareOp compareOp = null;
    boolean isNullTest = false;
    ByteArrayComparable comparator = new BinaryComparator(fieldValue);
    byte[] startRow = HConstants.EMPTY_START_ROW;
    byte[] stopRow = HConstants.EMPTY_END_ROW;
    switch(functionName) {
        case "equal":
            compareOp = CompareOp.EQUAL;
            if (isRowKey) {
                startRow = fieldValue;
                /* stopRow should be just greater than 'value'*/
                stopRow = Arrays.copyOf(fieldValue, fieldValue.length + 1);
                compareOp = CompareOp.EQUAL;
            }
            break;
        case "not_equal":
            compareOp = CompareOp.NOT_EQUAL;
            break;
        case "greater_than_or_equal_to":
            if (sortOrderAscending) {
                compareOp = CompareOp.GREATER_OR_EQUAL;
                if (isRowKey) {
                    startRow = fieldValue;
                }
            } else {
                compareOp = CompareOp.LESS_OR_EQUAL;
                if (isRowKey) {
                    // stopRow should be just greater than 'value'
                    stopRow = Arrays.copyOf(fieldValue, fieldValue.length + 1);
                }
            }
            break;
        case "greater_than":
            if (sortOrderAscending) {
                compareOp = CompareOp.GREATER;
                if (isRowKey) {
                    // startRow should be just greater than 'value'
                    startRow = Arrays.copyOf(fieldValue, fieldValue.length + 1);
                }
            } else {
                compareOp = CompareOp.LESS;
                if (isRowKey) {
                    stopRow = fieldValue;
                }
            }
            break;
        case "less_than_or_equal_to":
            if (sortOrderAscending) {
                compareOp = CompareOp.LESS_OR_EQUAL;
                if (isRowKey) {
                    // stopRow should be just greater than 'value'
                    stopRow = Arrays.copyOf(fieldValue, fieldValue.length + 1);
                }
            } else {
                compareOp = CompareOp.GREATER_OR_EQUAL;
                if (isRowKey) {
                    startRow = fieldValue;
                }
            }
            break;
        case "less_than":
            if (sortOrderAscending) {
                compareOp = CompareOp.LESS;
                if (isRowKey) {
                    stopRow = fieldValue;
                }
            } else {
                compareOp = CompareOp.GREATER;
                if (isRowKey) {
                    // startRow should be just greater than 'value'
                    startRow = Arrays.copyOf(fieldValue, fieldValue.length + 1);
                }
            }
            break;
        case "isnull":
        case "isNull":
        case "is null":
            if (isRowKey) {
                return null;
            }
            isNullTest = true;
            compareOp = CompareOp.EQUAL;
            comparator = new NullComparator();
            break;
        case "isnotnull":
        case "isNotNull":
        case "is not null":
            if (isRowKey) {
                return null;
            }
            compareOp = CompareOp.NOT_EQUAL;
            comparator = new NullComparator();
            break;
        case "like":
            /*
       * Convert the LIKE operand to Regular Expression pattern so that we can
       * apply RegexStringComparator()
       */
            HBaseRegexParser parser = new HBaseRegexParser(call).parse();
            compareOp = CompareOp.EQUAL;
            comparator = new RegexStringComparator(parser.getRegexString());
            /*
       * We can possibly do better if the LIKE operator is on the row_key
       */
            if (isRowKey) {
                String prefix = parser.getPrefixString();
                if (prefix != null) {
                    /*
           * If there is a literal prefix, it can help us prune the scan to a sub range
           */
                    if (prefix.equals(parser.getLikeString())) {
                        /* The operand value is literal. This turns the LIKE operator to EQUAL operator */
                        startRow = stopRow = fieldValue;
                        compareOp = null;
                    } else {
                        startRow = prefix.getBytes(Charsets.UTF_8);
                        stopRow = startRow.clone();
                        boolean isMaxVal = true;
                        for (int i = stopRow.length - 1; i >= 0; --i) {
                            int nextByteValue = (0xff & stopRow[i]) + 1;
                            if (nextByteValue < 0xff) {
                                stopRow[i] = (byte) nextByteValue;
                                isMaxVal = false;
                                break;
                            } else {
                                stopRow[i] = 0;
                            }
                        }
                        if (isMaxVal) {
                            stopRow = HConstants.EMPTY_END_ROW;
                        }
                    }
                }
            }
            break;
    }
    if (compareOp != null || startRow != HConstants.EMPTY_START_ROW || stopRow != HConstants.EMPTY_END_ROW) {
        Filter filter = null;
        if (isRowKey) {
            if (compareOp != null) {
                filter = new RowFilter(compareOp, comparator);
            }
        } else {
            byte[] family = HBaseUtils.getBytes(field.getRootSegment().getPath());
            byte[] qualifier = HBaseUtils.getBytes(field.getRootSegment().getChild().getNameSegment().getPath());
            filter = new SingleColumnValueFilter(family, qualifier, compareOp, comparator);
            ((SingleColumnValueFilter) filter).setLatestVersionOnly(true);
            if (!isNullTest) {
                ((SingleColumnValueFilter) filter).setFilterIfMissing(true);
            }
        }
        return new HBaseScanSpec(groupScan.getTableName(), startRow, stopRow, filter);
    }
    // else
    return null;
}
Also used : SingleColumnValueFilter(org.apache.hadoop.hbase.filter.SingleColumnValueFilter) NullComparator(org.apache.hadoop.hbase.filter.NullComparator) BinaryComparator(org.apache.hadoop.hbase.filter.BinaryComparator) RegexStringComparator(org.apache.hadoop.hbase.filter.RegexStringComparator) ByteArrayComparable(org.apache.hadoop.hbase.filter.ByteArrayComparable) RowFilter(org.apache.hadoop.hbase.filter.RowFilter) SchemaPath(org.apache.drill.common.expression.SchemaPath) RowFilter(org.apache.hadoop.hbase.filter.RowFilter) Filter(org.apache.hadoop.hbase.filter.Filter) SingleColumnValueFilter(org.apache.hadoop.hbase.filter.SingleColumnValueFilter) CompareOp(org.apache.hadoop.hbase.filter.CompareFilter.CompareOp)

Example 18 with SingleColumnValueFilter

use of org.apache.hadoop.hbase.filter.SingleColumnValueFilter in project cxf by apache.

the class HBaseQueryVisitor method buildSimpleQuery.

private Filter buildSimpleQuery(ConditionType ct, String name, Object value) {
    name = super.getRealPropertyName(name);
    validatePropertyValue(name, value);
    Class<?> clazz = getPrimitiveFieldClass(name, value.getClass());
    CompareOp compareOp = null;
    boolean regexCompRequired = false;
    switch(ct) {
        case EQUALS:
            compareOp = CompareOp.EQUAL;
            regexCompRequired = String.class == clazz && value.toString().endsWith("*");
            break;
        case NOT_EQUALS:
            compareOp = CompareOp.NOT_EQUAL;
            regexCompRequired = String.class == clazz && value.toString().endsWith("*");
            break;
        case GREATER_THAN:
            compareOp = CompareOp.GREATER;
            break;
        case GREATER_OR_EQUALS:
            compareOp = CompareOp.GREATER_OR_EQUAL;
            break;
        case LESS_THAN:
            compareOp = CompareOp.LESS;
            break;
        case LESS_OR_EQUALS:
            compareOp = CompareOp.LESS_OR_EQUAL;
            break;
        default:
            break;
    }
    String qualifier = name;
    String theFamily = family != null ? family : familyMap.get(qualifier);
    ByteArrayComparable byteArrayComparable = regexCompRequired ? new RegexStringComparator(value.toString().replace("*", ".")) : new BinaryComparator(value.toString().getBytes(StandardCharsets.UTF_8));
    return new SingleColumnValueFilter(theFamily.getBytes(StandardCharsets.UTF_8), qualifier.getBytes(StandardCharsets.UTF_8), compareOp, byteArrayComparable);
}
Also used : RegexStringComparator(org.apache.hadoop.hbase.filter.RegexStringComparator) ByteArrayComparable(org.apache.hadoop.hbase.filter.ByteArrayComparable) SingleColumnValueFilter(org.apache.hadoop.hbase.filter.SingleColumnValueFilter) CompareOp(org.apache.hadoop.hbase.filter.CompareFilter.CompareOp) BinaryComparator(org.apache.hadoop.hbase.filter.BinaryComparator)

Example 19 with SingleColumnValueFilter

use of org.apache.hadoop.hbase.filter.SingleColumnValueFilter in project hbase by apache.

the class TestRegionServerReadRequestMetrics method testReadRequestsCountWithFilter.

@Test
public void testReadRequestsCountWithFilter() throws Exception {
    int resultCount;
    Scan scan;
    // test for scan
    scan = new Scan();
    scan.setFilter(new SingleColumnValueFilter(CF1, COL1, CompareFilter.CompareOp.EQUAL, VAL1));
    try (ResultScanner scanner = table.getScanner(scan)) {
        resultCount = 0;
        for (Result ignore : scanner) {
            resultCount++;
        }
        testReadRequests(resultCount, 2, 1);
    }
    // test for scan
    scan = new Scan();
    scan.setFilter(new RowFilter(CompareFilter.CompareOp.EQUAL, new BinaryComparator(ROW1)));
    try (ResultScanner scanner = table.getScanner(scan)) {
        resultCount = 0;
        for (Result ignore : scanner) {
            resultCount++;
        }
        testReadRequests(resultCount, 1, 2);
    }
    // test for scan
    scan = new Scan(ROW2, ROW3);
    scan.setFilter(new RowFilter(CompareFilter.CompareOp.EQUAL, new BinaryComparator(ROW1)));
    try (ResultScanner scanner = table.getScanner(scan)) {
        resultCount = 0;
        for (Result ignore : scanner) {
            resultCount++;
        }
        testReadRequests(resultCount, 0, 1);
    }
// fixme filtered get should not increase readRequestsCount
//    Get get = new Get(ROW2);
//    get.setFilter(new SingleColumnValueFilter(CF1, COL1, CompareFilter.CompareOp.EQUAL, VAL1));
//    Result result = table.get(get);
//    resultCount = result.isEmpty() ? 0 : 1;
//    testReadRequests(resultCount, 0, 1);
}
Also used : SingleColumnValueFilter(org.apache.hadoop.hbase.filter.SingleColumnValueFilter) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) RowFilter(org.apache.hadoop.hbase.filter.RowFilter) Scan(org.apache.hadoop.hbase.client.Scan) BinaryComparator(org.apache.hadoop.hbase.filter.BinaryComparator) Result(org.apache.hadoop.hbase.client.Result) Test(org.junit.Test)

Example 20 with SingleColumnValueFilter

use of org.apache.hadoop.hbase.filter.SingleColumnValueFilter in project hadoop by apache.

the class TimelineFilterUtils method createSingleColValueFiltersByRange.

/**
   * Create 2 HBase {@link SingleColumnValueFilter} filters for the specified
   * value range represented by start and end value and wraps them inside a
   * filter list. Start and end value should not be null.
   *
   * @param <T> Describes the type of column prefix.
   * @param column Column for which single column value filter is to be created.
   * @param startValue Start value.
   * @param endValue End value.
   * @return 2 single column value filters wrapped in a filter list.
   * @throws IOException if any problem is encountered while encoding value.
   */
public static <T> FilterList createSingleColValueFiltersByRange(Column<T> column, Object startValue, Object endValue) throws IOException {
    FilterList list = new FilterList();
    Filter singleColValFilterStart = createHBaseSingleColValueFilter(column.getColumnFamilyBytes(), column.getColumnQualifierBytes(), column.getValueConverter().encodeValue(startValue), CompareOp.GREATER_OR_EQUAL, true);
    list.addFilter(singleColValFilterStart);
    Filter singleColValFilterEnd = createHBaseSingleColValueFilter(column.getColumnFamilyBytes(), column.getColumnQualifierBytes(), column.getValueConverter().encodeValue(endValue), CompareOp.LESS_OR_EQUAL, true);
    list.addFilter(singleColValFilterEnd);
    return list;
}
Also used : FamilyFilter(org.apache.hadoop.hbase.filter.FamilyFilter) QualifierFilter(org.apache.hadoop.hbase.filter.QualifierFilter) Filter(org.apache.hadoop.hbase.filter.Filter) SingleColumnValueFilter(org.apache.hadoop.hbase.filter.SingleColumnValueFilter) FilterList(org.apache.hadoop.hbase.filter.FilterList)

Aggregations

SingleColumnValueFilter (org.apache.hadoop.hbase.filter.SingleColumnValueFilter)31 Scan (org.apache.hadoop.hbase.client.Scan)12 BinaryComparator (org.apache.hadoop.hbase.filter.BinaryComparator)10 Filter (org.apache.hadoop.hbase.filter.Filter)9 FilterList (org.apache.hadoop.hbase.filter.FilterList)8 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)7 BitComparator (org.apache.hadoop.hbase.filter.BitComparator)7 Result (org.apache.hadoop.hbase.client.Result)6 RowFilter (org.apache.hadoop.hbase.filter.RowFilter)6 Test (org.junit.Test)5 ArrayList (java.util.ArrayList)4 CompareOp (org.apache.hadoop.hbase.filter.CompareFilter.CompareOp)4 FirstKeyOnlyFilter (org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter)4 Put (org.apache.hadoop.hbase.client.Put)3 ByteArrayComparable (org.apache.hadoop.hbase.filter.ByteArrayComparable)3 PrefixFilter (org.apache.hadoop.hbase.filter.PrefixFilter)3 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)3 SchemaPath (org.apache.drill.common.expression.SchemaPath)2 Cell (org.apache.hadoop.hbase.Cell)2 TableName (org.apache.hadoop.hbase.TableName)2