use of org.apache.hadoop.hbase.filter.FilterList in project hbase by apache.
the class TestReversibleScanners method testReversibleRegionScanner.
@Test
public void testReversibleRegionScanner() throws IOException {
byte[] FAMILYNAME2 = Bytes.toBytes("testCf2");
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName())).addFamily(new HColumnDescriptor(FAMILYNAME)).addFamily(new HColumnDescriptor(FAMILYNAME2));
Region region = TEST_UTIL.createLocalHRegion(htd, null, null);
loadDataToRegion(region, FAMILYNAME2);
// verify row count with forward scan
Scan scan = new Scan();
InternalScanner scanner = region.getScanner(scan);
verifyCountAndOrder(scanner, ROWSIZE * QUALSIZE * 2, ROWSIZE, true);
// Case1:Full reversed scan
scan.setReversed(true);
scanner = region.getScanner(scan);
verifyCountAndOrder(scanner, ROWSIZE * QUALSIZE * 2, ROWSIZE, false);
// Case2:Full reversed scan with one family
scan = new Scan();
scan.setReversed(true);
scan.addFamily(FAMILYNAME);
scanner = region.getScanner(scan);
verifyCountAndOrder(scanner, ROWSIZE * QUALSIZE, ROWSIZE, false);
// Case3:Specify qualifiers + One family
byte[][] specifiedQualifiers = { QUALS[1], QUALS[2] };
for (byte[] specifiedQualifier : specifiedQualifiers) scan.addColumn(FAMILYNAME, specifiedQualifier);
scanner = region.getScanner(scan);
verifyCountAndOrder(scanner, ROWSIZE * 2, ROWSIZE, false);
// Case4:Specify qualifiers + Two families
for (byte[] specifiedQualifier : specifiedQualifiers) scan.addColumn(FAMILYNAME2, specifiedQualifier);
scanner = region.getScanner(scan);
verifyCountAndOrder(scanner, ROWSIZE * 2 * 2, ROWSIZE, false);
// Case5: Case4 + specify start row
int startRowNum = ROWSIZE * 3 / 4;
scan.setStartRow(ROWS[startRowNum]);
scanner = region.getScanner(scan);
verifyCountAndOrder(scanner, (startRowNum + 1) * 2 * 2, (startRowNum + 1), false);
// Case6: Case4 + specify stop row
int stopRowNum = ROWSIZE / 4;
scan.setStartRow(HConstants.EMPTY_BYTE_ARRAY);
scan.setStopRow(ROWS[stopRowNum]);
scanner = region.getScanner(scan);
verifyCountAndOrder(scanner, (ROWSIZE - stopRowNum - 1) * 2 * 2, (ROWSIZE - stopRowNum - 1), false);
// Case7: Case4 + specify start row + specify stop row
scan.setStartRow(ROWS[startRowNum]);
scanner = region.getScanner(scan);
verifyCountAndOrder(scanner, (startRowNum - stopRowNum) * 2 * 2, (startRowNum - stopRowNum), false);
// Case8: Case7 + SingleColumnValueFilter
int valueNum = startRowNum % VALUESIZE;
Filter filter = new SingleColumnValueFilter(FAMILYNAME, specifiedQualifiers[0], CompareOp.EQUAL, VALUES[valueNum]);
scan.setFilter(filter);
scanner = region.getScanner(scan);
int unfilteredRowNum = (startRowNum - stopRowNum) / VALUESIZE + (stopRowNum / VALUESIZE == valueNum ? 0 : 1);
verifyCountAndOrder(scanner, unfilteredRowNum * 2 * 2, unfilteredRowNum, false);
// Case9: Case7 + PageFilter
int pageSize = 10;
filter = new PageFilter(pageSize);
scan.setFilter(filter);
scanner = region.getScanner(scan);
int expectedRowNum = pageSize;
verifyCountAndOrder(scanner, expectedRowNum * 2 * 2, expectedRowNum, false);
// Case10: Case7 + FilterList+MUST_PASS_ONE
SingleColumnValueFilter scvFilter1 = new SingleColumnValueFilter(FAMILYNAME, specifiedQualifiers[0], CompareOp.EQUAL, VALUES[0]);
SingleColumnValueFilter scvFilter2 = new SingleColumnValueFilter(FAMILYNAME, specifiedQualifiers[0], CompareOp.EQUAL, VALUES[1]);
expectedRowNum = 0;
for (int i = startRowNum; i > stopRowNum; i--) {
if (i % VALUESIZE == 0 || i % VALUESIZE == 1) {
expectedRowNum++;
}
}
filter = new FilterList(Operator.MUST_PASS_ONE, scvFilter1, scvFilter2);
scan.setFilter(filter);
scanner = region.getScanner(scan);
verifyCountAndOrder(scanner, expectedRowNum * 2 * 2, expectedRowNum, false);
// Case10: Case7 + FilterList+MUST_PASS_ALL
filter = new FilterList(Operator.MUST_PASS_ALL, scvFilter1, scvFilter2);
expectedRowNum = 0;
scan.setFilter(filter);
scanner = region.getScanner(scan);
verifyCountAndOrder(scanner, expectedRowNum * 2 * 2, expectedRowNum, false);
}
use of org.apache.hadoop.hbase.filter.FilterList in project titan by thinkaurelius.
the class HBaseKeyColumnValueStore method getFilter.
public static Filter getFilter(SliceQuery query) {
byte[] colStartBytes = query.getSliceEnd().length() > 0 ? query.getSliceStart().as(StaticBuffer.ARRAY_FACTORY) : null;
byte[] colEndBytes = query.getSliceEnd().length() > 0 ? query.getSliceEnd().as(StaticBuffer.ARRAY_FACTORY) : null;
Filter filter = new ColumnRangeFilter(colStartBytes, true, colEndBytes, false);
if (query.hasLimit()) {
filter = new FilterList(FilterList.Operator.MUST_PASS_ALL, filter, new ColumnPaginationFilter(query.getLimit(), 0));
}
logger.debug("Generated HBase Filter {}", filter);
return filter;
}
use of org.apache.hadoop.hbase.filter.FilterList in project pinpoint by naver.
the class HbaseApplicationTraceIndexDao method makeResponseTimeFilter.
/**
* make the hbase filter for selecting values of y-axis(response time) in order to select transactions in scatter chart.
* 4 bytes for elapsed time should be attached for the prefix of column qualifier for to use this filter.
*
* @param area
* @param offsetTransactionId
* @param offsetTransactionElapsed
* @return
*/
private Filter makeResponseTimeFilter(final SelectedScatterArea area, final TransactionId offsetTransactionId, int offsetTransactionElapsed) {
// filter by response time
ResponseTimeRange responseTimeRange = area.getResponseTimeRange();
byte[] responseFrom = Bytes.toBytes(responseTimeRange.getFrom());
byte[] responseTo = Bytes.toBytes(responseTimeRange.getTo());
FilterList filterList = new FilterList(Operator.MUST_PASS_ALL);
filterList.addFilter(new QualifierFilter(CompareOp.GREATER_OR_EQUAL, new BinaryPrefixComparator(responseFrom)));
filterList.addFilter(new QualifierFilter(CompareOp.LESS_OR_EQUAL, new BinaryPrefixComparator(responseTo)));
// add offset
if (offsetTransactionId != null) {
final Buffer buffer = new AutomaticBuffer(32);
buffer.putInt(offsetTransactionElapsed);
buffer.putPrefixedString(offsetTransactionId.getAgentId());
buffer.putSVLong(offsetTransactionId.getAgentStartTime());
buffer.putVLong(offsetTransactionId.getTransactionSequence());
byte[] qualifierOffset = buffer.getBuffer();
filterList.addFilter(new QualifierFilter(CompareOp.GREATER, new BinaryPrefixComparator(qualifierOffset)));
}
return filterList;
}
use of org.apache.hadoop.hbase.filter.FilterList in project camel by apache.
the class HBaseProducer method scanCells.
/**
* Performs an HBase {@link Get} on a specific row, using a collection of values (family/column/value pairs).
* The result is <p>the most recent entry</p> for each column.
*/
private List<HBaseRow> scanCells(Table table, HBaseRow model, String start, String stop, Integer maxRowScan, List<Filter> filters) throws Exception {
List<HBaseRow> rowSet = new LinkedList<>();
HBaseRow startRow = new HBaseRow(model.getCells());
startRow.setId(start);
Scan scan;
if (start != null) {
scan = new Scan(Bytes.toBytes(start));
} else {
scan = new Scan();
}
if (ObjectHelper.isNotEmpty(stop)) {
scan.setStopRow(Bytes.toBytes(stop));
}
if (filters != null && !filters.isEmpty()) {
for (int i = 0; i < filters.size(); i++) {
((ModelAwareFilter<?>) filters.get(i)).apply(endpoint.getCamelContext(), model);
scan.setFilter(new FilterList(FilterList.Operator.MUST_PASS_ALL, ((ModelAwareFilter<?>) filters.get(i)).getFilteredList()));
}
}
Set<HBaseCell> cellModels = model.getCells();
for (HBaseCell cellModel : cellModels) {
String family = cellModel.getFamily();
String column = cellModel.getQualifier();
if (ObjectHelper.isNotEmpty(family) && ObjectHelper.isNotEmpty(column)) {
scan.addColumn(HBaseHelper.getHBaseFieldAsBytes(family), HBaseHelper.getHBaseFieldAsBytes(column));
}
}
ResultScanner resultScanner = table.getScanner(scan);
int count = 0;
Result result = resultScanner.next();
while (result != null && count < maxRowScan) {
HBaseRow resultRow = new HBaseRow();
resultRow.setId(endpoint.getCamelContext().getTypeConverter().convertTo(model.getRowType(), result.getRow()));
resultRow.setTimestamp(result.rawCells()[0].getTimestamp());
cellModels = model.getCells();
for (HBaseCell modelCell : cellModels) {
HBaseCell resultCell = new HBaseCell();
String family = modelCell.getFamily();
String column = modelCell.getQualifier();
resultRow.setId(endpoint.getCamelContext().getTypeConverter().convertTo(model.getRowType(), result.getRow()));
resultCell.setValue(endpoint.getCamelContext().getTypeConverter().convertTo(modelCell.getValueType(), result.getValue(HBaseHelper.getHBaseFieldAsBytes(family), HBaseHelper.getHBaseFieldAsBytes(column))));
resultCell.setFamily(modelCell.getFamily());
resultCell.setQualifier(modelCell.getQualifier());
Cell cell = result.getColumnLatestCell(HBaseHelper.getHBaseFieldAsBytes(family), HBaseHelper.getHBaseFieldAsBytes(column));
if (cell != null) {
resultCell.setTimestamp(cell.getTimestamp());
}
resultRow.getCells().add(resultCell);
}
rowSet.add(resultRow);
count++;
result = resultScanner.next();
}
return rowSet;
}
use of org.apache.hadoop.hbase.filter.FilterList in project incubator-atlas by apache.
the class HBaseKeyColumnValueStore method getFilter.
public static Filter getFilter(SliceQuery query) {
byte[] colStartBytes = query.getSliceEnd().length() > 0 ? query.getSliceStart().as(StaticBuffer.ARRAY_FACTORY) : null;
byte[] colEndBytes = query.getSliceEnd().length() > 0 ? query.getSliceEnd().as(StaticBuffer.ARRAY_FACTORY) : null;
Filter filter = new ColumnRangeFilter(colStartBytes, true, colEndBytes, false);
if (query.hasLimit()) {
filter = new FilterList(FilterList.Operator.MUST_PASS_ALL, filter, new ColumnPaginationFilter(query.getLimit(), 0));
}
logger.debug("Generated HBase Filter {}", filter);
return filter;
}
Aggregations