use of org.apache.hadoop.hbase.filter.ColumnPrefixFilter in project hbase by apache.
the class TestServerSideScanMetricsFromClientSide method testRowsFilteredMetric.
public void testRowsFilteredMetric(Scan baseScan) throws Exception {
testRowsFilteredMetric(baseScan, null, 0);
// Row filter doesn't match any row key. All rows should be filtered
Filter filter = new RowFilter(CompareOp.EQUAL, new BinaryComparator("xyz".getBytes()));
testRowsFilteredMetric(baseScan, filter, ROWS.length);
// Filter will return results containing only the first key. Number of entire rows filtered
// should be 0.
filter = new FirstKeyOnlyFilter();
testRowsFilteredMetric(baseScan, filter, 0);
// Column prefix will find some matching qualifier on each row. Number of entire rows filtered
// should be 0
filter = new ColumnPrefixFilter(QUALIFIERS[0]);
testRowsFilteredMetric(baseScan, filter, 0);
// Column prefix will NOT find any matching qualifier on any row. All rows should be filtered
filter = new ColumnPrefixFilter("xyz".getBytes());
testRowsFilteredMetric(baseScan, filter, ROWS.length);
// Matching column value should exist in each row. No rows should be filtered.
filter = new SingleColumnValueFilter(FAMILIES[0], QUALIFIERS[0], CompareOp.EQUAL, VALUE);
testRowsFilteredMetric(baseScan, filter, 0);
// No matching column value should exist in any row. Filter all rows
filter = new SingleColumnValueFilter(FAMILIES[0], QUALIFIERS[0], CompareOp.NOT_EQUAL, VALUE);
testRowsFilteredMetric(baseScan, filter, ROWS.length);
List<Filter> filters = new ArrayList<>();
filters.add(new RowFilter(CompareOp.EQUAL, new BinaryComparator(ROWS[0])));
filters.add(new RowFilter(CompareOp.EQUAL, new BinaryComparator(ROWS[3])));
int numberOfMatchingRowFilters = filters.size();
filter = new FilterList(Operator.MUST_PASS_ONE, filters);
testRowsFilteredMetric(baseScan, filter, ROWS.length - numberOfMatchingRowFilters);
filters.clear();
// array in RegionScanner#nextInternal which should be interpreted as a row being filtered.
for (int family = 0; family < FAMILIES.length; family++) {
for (int qualifier = 0; qualifier < QUALIFIERS.length; qualifier++) {
filters.add(new SingleColumnValueExcludeFilter(FAMILIES[family], QUALIFIERS[qualifier], CompareOp.EQUAL, VALUE));
}
}
filter = new FilterList(Operator.MUST_PASS_ONE, filters);
testRowsFilteredMetric(baseScan, filter, ROWS.length);
}
use of org.apache.hadoop.hbase.filter.ColumnPrefixFilter in project hbase by apache.
the class TestPartialResultsFromClientSide method testPartialResultsWithColumnFilter.
/**
* Test partial Result re-assembly in the presence of different filters. The Results from the
* partial scanner should match the Results returned from a scanner that receives all of the
* results in one RPC to the server. The partial scanner is tested with a variety of different
* result sizes (all of which are less than the size necessary to fetch an entire row)
* @throws Exception
*/
@Test
public void testPartialResultsWithColumnFilter() throws Exception {
testPartialResultsWithColumnFilter(new FirstKeyOnlyFilter());
testPartialResultsWithColumnFilter(new ColumnPrefixFilter(Bytes.toBytes("testQualifier5")));
testPartialResultsWithColumnFilter(new ColumnRangeFilter(Bytes.toBytes("testQualifer1"), true, Bytes.toBytes("testQualifier7"), true));
Set<byte[]> qualifiers = new LinkedHashSet<>();
qualifiers.add(Bytes.toBytes("testQualifier5"));
testPartialResultsWithColumnFilter(new FirstKeyValueMatchingQualifiersFilter(qualifiers));
}
use of org.apache.hadoop.hbase.filter.ColumnPrefixFilter in project hbase by apache.
the class TestScannersFromClientSide method testGetMaxResults.
/**
* Test from client side for get with maxResultPerCF set
*
* @throws Exception
*/
@Test
public void testGetMaxResults() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
byte[][] FAMILIES = HTestConst.makeNAscii(FAMILY, 3);
byte[][] QUALIFIERS = HTestConst.makeNAscii(QUALIFIER, 20);
Table ht = TEST_UTIL.createTable(tableName, FAMILIES);
Get get;
Put put;
Result result;
boolean toLog = true;
List<Cell> kvListExp;
kvListExp = new ArrayList<>();
// Insert one CF for row[0]
put = new Put(ROW);
for (int i = 0; i < 10; i++) {
KeyValue kv = new KeyValue(ROW, FAMILIES[0], QUALIFIERS[i], 1, VALUE);
put.add(kv);
kvListExp.add(kv);
}
ht.put(put);
get = new Get(ROW);
result = ht.get(get);
verifyResult(result, kvListExp, toLog, "Testing without setting maxResults");
get = new Get(ROW);
get.setMaxResultsPerColumnFamily(2);
result = ht.get(get);
kvListExp = new ArrayList<>();
kvListExp.add(new KeyValue(ROW, FAMILIES[0], QUALIFIERS[0], 1, VALUE));
kvListExp.add(new KeyValue(ROW, FAMILIES[0], QUALIFIERS[1], 1, VALUE));
verifyResult(result, kvListExp, toLog, "Testing basic setMaxResults");
// Filters: ColumnRangeFilter
get = new Get(ROW);
get.setMaxResultsPerColumnFamily(5);
get.setFilter(new ColumnRangeFilter(QUALIFIERS[2], true, QUALIFIERS[5], true));
result = ht.get(get);
kvListExp = new ArrayList<>();
kvListExp.add(new KeyValue(ROW, FAMILIES[0], QUALIFIERS[2], 1, VALUE));
kvListExp.add(new KeyValue(ROW, FAMILIES[0], QUALIFIERS[3], 1, VALUE));
kvListExp.add(new KeyValue(ROW, FAMILIES[0], QUALIFIERS[4], 1, VALUE));
kvListExp.add(new KeyValue(ROW, FAMILIES[0], QUALIFIERS[5], 1, VALUE));
verifyResult(result, kvListExp, toLog, "Testing single CF with CRF");
// Insert two more CF for row[0]
// 20 columns for CF2, 10 columns for CF1
put = new Put(ROW);
for (int i = 0; i < QUALIFIERS.length; i++) {
KeyValue kv = new KeyValue(ROW, FAMILIES[2], QUALIFIERS[i], 1, VALUE);
put.add(kv);
}
ht.put(put);
put = new Put(ROW);
for (int i = 0; i < 10; i++) {
KeyValue kv = new KeyValue(ROW, FAMILIES[1], QUALIFIERS[i], 1, VALUE);
put.add(kv);
}
ht.put(put);
get = new Get(ROW);
get.setMaxResultsPerColumnFamily(12);
get.addFamily(FAMILIES[1]);
get.addFamily(FAMILIES[2]);
result = ht.get(get);
kvListExp = new ArrayList<>();
//Exp: CF1:q0, ..., q9, CF2: q0, q1, q10, q11, ..., q19
for (int i = 0; i < 10; i++) {
kvListExp.add(new KeyValue(ROW, FAMILIES[1], QUALIFIERS[i], 1, VALUE));
}
for (int i = 0; i < 2; i++) {
kvListExp.add(new KeyValue(ROW, FAMILIES[2], QUALIFIERS[i], 1, VALUE));
}
for (int i = 10; i < 20; i++) {
kvListExp.add(new KeyValue(ROW, FAMILIES[2], QUALIFIERS[i], 1, VALUE));
}
verifyResult(result, kvListExp, toLog, "Testing multiple CFs");
// Filters: ColumnRangeFilter and ColumnPrefixFilter
get = new Get(ROW);
get.setMaxResultsPerColumnFamily(3);
get.setFilter(new ColumnRangeFilter(QUALIFIERS[2], true, null, true));
result = ht.get(get);
kvListExp = new ArrayList<>();
for (int i = 2; i < 5; i++) {
kvListExp.add(new KeyValue(ROW, FAMILIES[0], QUALIFIERS[i], 1, VALUE));
}
for (int i = 2; i < 5; i++) {
kvListExp.add(new KeyValue(ROW, FAMILIES[1], QUALIFIERS[i], 1, VALUE));
}
for (int i = 2; i < 5; i++) {
kvListExp.add(new KeyValue(ROW, FAMILIES[2], QUALIFIERS[i], 1, VALUE));
}
verifyResult(result, kvListExp, toLog, "Testing multiple CFs + CRF");
get = new Get(ROW);
get.setMaxResultsPerColumnFamily(7);
get.setFilter(new ColumnPrefixFilter(QUALIFIERS[1]));
result = ht.get(get);
kvListExp = new ArrayList<>();
kvListExp.add(new KeyValue(ROW, FAMILIES[0], QUALIFIERS[1], 1, VALUE));
kvListExp.add(new KeyValue(ROW, FAMILIES[1], QUALIFIERS[1], 1, VALUE));
kvListExp.add(new KeyValue(ROW, FAMILIES[2], QUALIFIERS[1], 1, VALUE));
for (int i = 10; i < 16; i++) {
kvListExp.add(new KeyValue(ROW, FAMILIES[2], QUALIFIERS[i], 1, VALUE));
}
verifyResult(result, kvListExp, toLog, "Testing multiple CFs + PFF");
}
use of org.apache.hadoop.hbase.filter.ColumnPrefixFilter in project cdap by caskdata.
the class HBaseStreamConsumerStateStore method fetchAll.
@Override
protected void fetchAll(byte[] row, byte[] columnPrefix, Map<byte[], byte[]> result) throws IOException {
Get get = new Get(row);
get.addFamily(QueueEntryRow.COLUMN_FAMILY);
get.setMaxVersions(1);
if (columnPrefix != null) {
get.setFilter(new ColumnPrefixFilter(columnPrefix));
}
Result hTableResult = hTable.get(get);
if (hTableResult.isEmpty()) {
return;
}
result.putAll(hTableResult.getFamilyMap(QueueEntryRow.COLUMN_FAMILY));
}
Aggregations