use of org.apache.hadoop.hbase.filter.RowFilter in project drill by apache.
the class MapRDBFilterBuilder method parseTree.
public HBaseScanSpec parseTree() {
HBaseScanSpec parsedSpec = le.accept(this, null);
if (parsedSpec != null) {
parsedSpec = mergeScanSpecs(FunctionNames.AND, this.groupScan.getHBaseScanSpec(), parsedSpec);
/*
* If RowFilter is THE filter attached to the scan specification,
* remove it since its effect is also achieved through startRow and stopRow.
*/
Filter filter = parsedSpec.getFilter();
if (filter instanceof RowFilter && ((RowFilter) filter).getOperator() != CompareOp.NOT_EQUAL && ((RowFilter) filter).getComparator() instanceof BinaryComparator) {
parsedSpec = new HBaseScanSpec(parsedSpec.getTableName(), parsedSpec.getStartRow(), parsedSpec.getStopRow(), null);
}
}
return parsedSpec;
}
use of org.apache.hadoop.hbase.filter.RowFilter in project drill by apache.
the class HBaseFilterBuilder method createHBaseScanSpec.
private HBaseScanSpec createHBaseScanSpec(FunctionCall call, CompareFunctionsProcessor processor) {
String functionName = processor.getFunctionName();
SchemaPath field = processor.getPath();
byte[] fieldValue = processor.getValue();
boolean sortOrderAscending = processor.isSortOrderAscending();
boolean isRowKey = field.getRootSegmentPath().equals(ROW_KEY);
if (!(isRowKey || (!field.getRootSegment().isLastPath() && field.getRootSegment().getChild().isLastPath() && field.getRootSegment().getChild().isNamed()))) {
/*
* if the field in this function is neither the row_key nor a qualified HBase column, return.
*/
return null;
}
if (processor.isRowKeyPrefixComparison()) {
return createRowKeyPrefixScanSpec(call, processor);
}
CompareOp compareOp = null;
boolean isNullTest = false;
ByteArrayComparable comparator = new BinaryComparator(fieldValue);
byte[] startRow = HConstants.EMPTY_START_ROW;
byte[] stopRow = HConstants.EMPTY_END_ROW;
switch(functionName) {
case "equal":
compareOp = CompareOp.EQUAL;
if (isRowKey) {
startRow = fieldValue;
/* stopRow should be just greater than 'value'*/
stopRow = Arrays.copyOf(fieldValue, fieldValue.length + 1);
compareOp = CompareOp.EQUAL;
}
break;
case FunctionNames.NE:
compareOp = CompareOp.NOT_EQUAL;
break;
case FunctionNames.GE:
if (sortOrderAscending) {
compareOp = CompareOp.GREATER_OR_EQUAL;
if (isRowKey) {
startRow = fieldValue;
}
} else {
compareOp = CompareOp.LESS_OR_EQUAL;
if (isRowKey) {
// stopRow should be just greater than 'value'
stopRow = Arrays.copyOf(fieldValue, fieldValue.length + 1);
}
}
break;
case FunctionNames.GT:
if (sortOrderAscending) {
compareOp = CompareOp.GREATER;
if (isRowKey) {
// startRow should be just greater than 'value'
startRow = Arrays.copyOf(fieldValue, fieldValue.length + 1);
}
} else {
compareOp = CompareOp.LESS;
if (isRowKey) {
stopRow = fieldValue;
}
}
break;
case FunctionNames.LE:
if (sortOrderAscending) {
compareOp = CompareOp.LESS_OR_EQUAL;
if (isRowKey) {
// stopRow should be just greater than 'value'
stopRow = Arrays.copyOf(fieldValue, fieldValue.length + 1);
}
} else {
compareOp = CompareOp.GREATER_OR_EQUAL;
if (isRowKey) {
startRow = fieldValue;
}
}
break;
case FunctionNames.LT:
if (sortOrderAscending) {
compareOp = CompareOp.LESS;
if (isRowKey) {
stopRow = fieldValue;
}
} else {
compareOp = CompareOp.GREATER;
if (isRowKey) {
// startRow should be just greater than 'value'
startRow = Arrays.copyOf(fieldValue, fieldValue.length + 1);
}
}
break;
case FunctionNames.IS_NULL:
case "isNull":
case "is null":
if (isRowKey) {
return null;
}
isNullTest = true;
compareOp = CompareOp.EQUAL;
comparator = new NullComparator();
break;
case FunctionNames.IS_NOT_NULL:
case "isNotNull":
case "is not null":
if (isRowKey) {
return null;
}
compareOp = CompareOp.NOT_EQUAL;
comparator = new NullComparator();
break;
case FunctionNames.LIKE:
/*
* Convert the LIKE operand to Regular Expression pattern so that we can
* apply RegexStringComparator()
*/
HBaseRegexParser parser = new HBaseRegexParser(call).parse();
compareOp = CompareOp.EQUAL;
comparator = new RegexStringComparator(parser.getRegexString());
/*
* We can possibly do better if the LIKE operator is on the row_key
*/
if (isRowKey) {
String prefix = parser.getPrefixString();
if (prefix != null) {
/*
* If there is a literal prefix, it can help us prune the scan to a sub range
*/
if (prefix.equals(parser.getLikeString())) {
/* The operand value is literal. This turns the LIKE operator to EQUAL operator */
startRow = stopRow = fieldValue;
compareOp = null;
} else {
startRow = prefix.getBytes(Charsets.UTF_8);
stopRow = startRow.clone();
boolean isMaxVal = true;
for (int i = stopRow.length - 1; i >= 0; --i) {
int nextByteValue = (0xff & stopRow[i]) + 1;
if (nextByteValue < 0xff) {
stopRow[i] = (byte) nextByteValue;
isMaxVal = false;
break;
} else {
stopRow[i] = 0;
}
}
if (isMaxVal) {
stopRow = HConstants.EMPTY_END_ROW;
}
}
}
}
break;
}
if (compareOp != null || startRow != HConstants.EMPTY_START_ROW || stopRow != HConstants.EMPTY_END_ROW) {
Filter filter = null;
if (isRowKey) {
if (compareOp != null) {
filter = new RowFilter(compareOp, comparator);
}
} else {
byte[] family = HBaseUtils.getBytes(field.getRootSegment().getPath());
byte[] qualifier = HBaseUtils.getBytes(field.getRootSegment().getChild().getNameSegment().getPath());
filter = new SingleColumnValueFilter(family, qualifier, compareOp, comparator);
((SingleColumnValueFilter) filter).setLatestVersionOnly(true);
if (!isNullTest) {
((SingleColumnValueFilter) filter).setFilterIfMissing(true);
}
}
return new HBaseScanSpec(groupScan.getTableName(), startRow, stopRow, filter);
}
// else
return null;
}
use of org.apache.hadoop.hbase.filter.RowFilter in project hbase by apache.
the class TestFromClientSide5 method testJira6912.
@Test
public void testJira6912() throws Exception {
final TableName tableName = name.getTableName();
try (Table foo = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }, 10)) {
List<Put> puts = new ArrayList<>();
for (int i = 0; i != 100; i++) {
Put put = new Put(Bytes.toBytes(i));
put.addColumn(FAMILY, FAMILY, Bytes.toBytes(i));
puts.add(put);
}
foo.put(puts);
// If i comment this out it works
TEST_UTIL.flush();
Scan scan = new Scan();
scan.withStartRow(Bytes.toBytes(1));
scan.withStopRow(Bytes.toBytes(3));
scan.addColumn(FAMILY, FAMILY);
scan.setFilter(new RowFilter(CompareOperator.NOT_EQUAL, new BinaryComparator(Bytes.toBytes(1))));
try (ResultScanner scanner = foo.getScanner(scan)) {
Result[] bar = scanner.next(100);
assertEquals(1, bar.length);
}
}
}
use of org.apache.hadoop.hbase.filter.RowFilter in project hbase by apache.
the class TestSerialization method testCompareFilter.
@Test
public void testCompareFilter() throws Exception {
Filter f = new RowFilter(CompareOperator.EQUAL, new BinaryComparator(Bytes.toBytes("testRowOne-2")));
byte[] bytes = f.toByteArray();
Filter ff = RowFilter.parseFrom(bytes);
assertNotNull(ff);
}
use of org.apache.hadoop.hbase.filter.RowFilter in project hbase by apache.
the class TestServerSideScanMetricsFromClientSide method testRowsFilteredMetric.
private void testRowsFilteredMetric(Scan baseScan) throws Exception {
testRowsFilteredMetric(baseScan, null, 0);
// Row filter doesn't match any row key. All rows should be filtered
Filter filter = new RowFilter(CompareOperator.EQUAL, new BinaryComparator(Bytes.toBytes("xyz")));
testRowsFilteredMetric(baseScan, filter, ROWS.length);
// Filter will return results containing only the first key. Number of entire rows filtered
// should be 0.
filter = new FirstKeyOnlyFilter();
testRowsFilteredMetric(baseScan, filter, 0);
// Column prefix will find some matching qualifier on each row. Number of entire rows filtered
// should be 0
filter = new ColumnPrefixFilter(QUALIFIERS[0]);
testRowsFilteredMetric(baseScan, filter, 0);
// Column prefix will NOT find any matching qualifier on any row. All rows should be filtered
filter = new ColumnPrefixFilter(Bytes.toBytes("xyz"));
testRowsFilteredMetric(baseScan, filter, ROWS.length);
// Matching column value should exist in each row. No rows should be filtered.
filter = new SingleColumnValueFilter(FAMILIES[0], QUALIFIERS[0], CompareOperator.EQUAL, VALUE);
testRowsFilteredMetric(baseScan, filter, 0);
// No matching column value should exist in any row. Filter all rows
filter = new SingleColumnValueFilter(FAMILIES[0], QUALIFIERS[0], CompareOperator.NOT_EQUAL, VALUE);
testRowsFilteredMetric(baseScan, filter, ROWS.length);
List<Filter> filters = new ArrayList<>();
filters.add(new RowFilter(CompareOperator.EQUAL, new BinaryComparator(ROWS[0])));
filters.add(new RowFilter(CompareOperator.EQUAL, new BinaryComparator(ROWS[3])));
int numberOfMatchingRowFilters = filters.size();
filter = new FilterList(Operator.MUST_PASS_ONE, filters);
testRowsFilteredMetric(baseScan, filter, ROWS.length - numberOfMatchingRowFilters);
filters.clear();
// array in RegionScanner#nextInternal which should be interpreted as a row being filtered.
for (int family = 0; family < FAMILIES.length; family++) {
for (int qualifier = 0; qualifier < QUALIFIERS.length; qualifier++) {
filters.add(new SingleColumnValueExcludeFilter(FAMILIES[family], QUALIFIERS[qualifier], CompareOperator.EQUAL, VALUE));
}
}
filter = new FilterList(Operator.MUST_PASS_ONE, filters);
testRowsFilteredMetric(baseScan, filter, ROWS.length);
}
Aggregations