use of org.apache.hadoop.hbase.filter.BinaryComparator in project phoenix by apache.
the class FirstLastValueServerAggregator method aggregate.
@Override
public void aggregate(Tuple tuple, ImmutableBytesWritable ptr) {
//set pointer to ordering by field
orderByColumn.evaluate(tuple, ptr);
byte[] currentOrder = ptr.copyBytes();
if (!dataColumn.evaluate(tuple, ptr)) {
return;
}
if (useOffset) {
boolean addFlag = false;
if (topValuesCount < offset) {
try {
addFlag = true;
} catch (Exception e) {
logger.error(e.getMessage());
}
} else {
if (isAscending) {
if (removeLastElement(currentOrder, topValues.lastKey(), -1)) {
addFlag = true;
topValuesCount--;
}
} else {
if (removeLastElement(currentOrder, topValues.firstKey(), 1)) {
addFlag = true;
topValuesCount--;
}
}
}
if (addFlag) {
topValuesCount++;
if (!topValues.containsKey(currentOrder)) {
topValues.put(currentOrder, new LinkedList<byte[]>());
}
//invert bytes if is SortOrder set
if (hasValueDescSortOrder) {
topValues.get(currentOrder).push(SortOrder.invert(ptr.get(), ptr.getOffset(), ptr.getLength()));
} else {
topValues.get(currentOrder).push(ptr.copyBytes());
}
}
} else {
boolean isHigher;
if (isAscending) {
isHigher = topOrder.compareTo(currentOrder) > 0;
} else {
//desc
isHigher = topOrder.compareTo(currentOrder) < 0;
}
if (topOrder.getValue().length < 1 || isHigher) {
if (hasValueDescSortOrder) {
topValue = SortOrder.invert(ptr.get(), ptr.getOffset(), ptr.getLength());
} else {
topValue = ptr.copyBytes();
}
topOrder = new BinaryComparator(currentOrder);
}
}
}
use of org.apache.hadoop.hbase.filter.BinaryComparator in project phoenix by apache.
the class ScannerBuilder method getColumnFilters.
/**
* @param columns columns to filter
* @return filter that will skip any {@link KeyValue} that doesn't match one of the passed columns
* and the
*/
private Filter getColumnFilters(Collection<? extends ColumnReference> columns) {
// each column needs to be added as an OR, so we need to separate them out
FilterList columnFilters = new FilterList(FilterList.Operator.MUST_PASS_ONE);
// create a filter that matches each column reference
for (ColumnReference ref : columns) {
Filter columnFilter = new FamilyFilter(CompareOp.EQUAL, new BinaryComparator(ref.getFamily()));
// combine with a match for the qualifier, if the qualifier is a specific qualifier
if (!Bytes.equals(ColumnReference.ALL_QUALIFIERS, ref.getQualifier())) {
columnFilter = new FilterList(columnFilter, new QualifierFilter(CompareOp.EQUAL, new BinaryComparator(ref.getQualifier())));
}
columnFilters.addFilter(columnFilter);
}
return columnFilters;
}
use of org.apache.hadoop.hbase.filter.BinaryComparator in project hadoop by apache.
the class FlowRunEntityReader method constructFilterListBasedOnFields.
@Override
protected FilterList constructFilterListBasedOnFields() throws IOException {
FilterList list = new FilterList(Operator.MUST_PASS_ONE);
// By default fetch everything in INFO column family.
FamilyFilter infoColumnFamily = new FamilyFilter(CompareOp.EQUAL, new BinaryComparator(FlowRunColumnFamily.INFO.getBytes()));
TimelineDataToRetrieve dataToRetrieve = getDataToRetrieve();
// Metrics are always returned if we are reading a single entity.
if (!isSingleEntityRead() && !hasField(dataToRetrieve.getFieldsToRetrieve(), Field.METRICS)) {
FilterList infoColFamilyList = new FilterList(Operator.MUST_PASS_ONE);
infoColFamilyList.addFilter(infoColumnFamily);
infoColFamilyList.addFilter(new QualifierFilter(CompareOp.NOT_EQUAL, new BinaryPrefixComparator(FlowRunColumnPrefix.METRIC.getColumnPrefixBytes(""))));
list.addFilter(infoColFamilyList);
} else {
// Check if metricsToRetrieve are specified and if they are, create a
// filter list for info column family by adding flow run tables columns
// and a list for metrics to retrieve. Pls note that fieldsToRetrieve
// will have METRICS added to it if metricsToRetrieve are specified
// (in augmentParams()).
TimelineFilterList metricsToRetrieve = dataToRetrieve.getMetricsToRetrieve();
if (metricsToRetrieve != null && !metricsToRetrieve.getFilterList().isEmpty()) {
FilterList infoColFamilyList = new FilterList();
infoColFamilyList.addFilter(infoColumnFamily);
FilterList columnsList = updateFixedColumns();
columnsList.addFilter(TimelineFilterUtils.createHBaseFilterList(FlowRunColumnPrefix.METRIC, metricsToRetrieve));
infoColFamilyList.addFilter(columnsList);
list.addFilter(infoColFamilyList);
}
}
return list;
}
use of org.apache.hadoop.hbase.filter.BinaryComparator in project hadoop by apache.
the class GenericEntityReader method constructFilterListBasedOnFields.
@Override
protected FilterList constructFilterListBasedOnFields() throws IOException {
if (!needCreateFilterListBasedOnFields()) {
// Fetch all the columns. No need of a filter.
return null;
}
FilterList listBasedOnFields = new FilterList(Operator.MUST_PASS_ONE);
FilterList infoColFamilyList = new FilterList();
// By default fetch everything in INFO column family.
FamilyFilter infoColumnFamily = new FamilyFilter(CompareOp.EQUAL, new BinaryComparator(EntityColumnFamily.INFO.getBytes()));
infoColFamilyList.addFilter(infoColumnFamily);
if (!isSingleEntityRead() && fetchPartialColsFromInfoFamily()) {
// We can fetch only some of the columns from info family.
infoColFamilyList.addFilter(createFilterListForColsOfInfoFamily());
} else {
// Exclude column prefixes in info column family which are not required
// based on fields to retrieve.
excludeFieldsFromInfoColFamily(infoColFamilyList);
}
listBasedOnFields.addFilter(infoColFamilyList);
updateFilterForConfsAndMetricsToRetrieve(listBasedOnFields);
return listBasedOnFields;
}
use of org.apache.hadoop.hbase.filter.BinaryComparator in project hbase by apache.
the class TestRegionServerReadRequestMetrics method testReadRequestsCountWithFilter.
@Test
public void testReadRequestsCountWithFilter() throws Exception {
int resultCount;
Scan scan;
// test for scan
scan = new Scan();
scan.setFilter(new SingleColumnValueFilter(CF1, COL1, CompareFilter.CompareOp.EQUAL, VAL1));
try (ResultScanner scanner = table.getScanner(scan)) {
resultCount = 0;
for (Result ignore : scanner) {
resultCount++;
}
testReadRequests(resultCount, 2, 1);
}
// test for scan
scan = new Scan();
scan.setFilter(new RowFilter(CompareFilter.CompareOp.EQUAL, new BinaryComparator(ROW1)));
try (ResultScanner scanner = table.getScanner(scan)) {
resultCount = 0;
for (Result ignore : scanner) {
resultCount++;
}
testReadRequests(resultCount, 1, 2);
}
// test for scan
scan = new Scan(ROW2, ROW3);
scan.setFilter(new RowFilter(CompareFilter.CompareOp.EQUAL, new BinaryComparator(ROW1)));
try (ResultScanner scanner = table.getScanner(scan)) {
resultCount = 0;
for (Result ignore : scanner) {
resultCount++;
}
testReadRequests(resultCount, 0, 1);
}
// fixme filtered get should not increase readRequestsCount
// Get get = new Get(ROW2);
// get.setFilter(new SingleColumnValueFilter(CF1, COL1, CompareFilter.CompareOp.EQUAL, VAL1));
// Result result = table.get(get);
// resultCount = result.isEmpty() ? 0 : 1;
// testReadRequests(resultCount, 0, 1);
}
Aggregations