use of org.apache.hadoop.hbase.filter.FilterList in project hbase by apache.
the class QuotaTableUtil method makeFilter.
/**
* converts quotafilter to serializeable filterlists.
*/
public static Filter makeFilter(final QuotaFilter filter) {
FilterList filterList = new FilterList(FilterList.Operator.MUST_PASS_ALL);
if (!Strings.isEmpty(filter.getUserFilter())) {
FilterList userFilters = new FilterList(FilterList.Operator.MUST_PASS_ONE);
boolean hasFilter = false;
if (!Strings.isEmpty(filter.getNamespaceFilter())) {
FilterList nsFilters = new FilterList(FilterList.Operator.MUST_PASS_ALL);
nsFilters.addFilter(new RowFilter(CompareFilter.CompareOp.EQUAL, new RegexStringComparator(getUserRowKeyRegex(filter.getUserFilter()), 0)));
nsFilters.addFilter(new QualifierFilter(CompareFilter.CompareOp.EQUAL, new RegexStringComparator(getSettingsQualifierRegexForUserNamespace(filter.getNamespaceFilter()), 0)));
userFilters.addFilter(nsFilters);
hasFilter = true;
}
if (!Strings.isEmpty(filter.getTableFilter())) {
FilterList tableFilters = new FilterList(FilterList.Operator.MUST_PASS_ALL);
tableFilters.addFilter(new RowFilter(CompareFilter.CompareOp.EQUAL, new RegexStringComparator(getUserRowKeyRegex(filter.getUserFilter()), 0)));
tableFilters.addFilter(new QualifierFilter(CompareFilter.CompareOp.EQUAL, new RegexStringComparator(getSettingsQualifierRegexForUserTable(filter.getTableFilter()), 0)));
userFilters.addFilter(tableFilters);
hasFilter = true;
}
if (!hasFilter) {
userFilters.addFilter(new RowFilter(CompareFilter.CompareOp.EQUAL, new RegexStringComparator(getUserRowKeyRegex(filter.getUserFilter()), 0)));
}
filterList.addFilter(userFilters);
} else if (!Strings.isEmpty(filter.getTableFilter())) {
filterList.addFilter(new RowFilter(CompareFilter.CompareOp.EQUAL, new RegexStringComparator(getTableRowKeyRegex(filter.getTableFilter()), 0)));
} else if (!Strings.isEmpty(filter.getNamespaceFilter())) {
filterList.addFilter(new RowFilter(CompareFilter.CompareOp.EQUAL, new RegexStringComparator(getNamespaceRowKeyRegex(filter.getNamespaceFilter()), 0)));
}
return filterList;
}
use of org.apache.hadoop.hbase.filter.FilterList in project hadoop by apache.
the class FlowRunEntityReader method constructFilterListBasedOnFields.
@Override
protected FilterList constructFilterListBasedOnFields() throws IOException {
FilterList list = new FilterList(Operator.MUST_PASS_ONE);
// By default fetch everything in INFO column family.
FamilyFilter infoColumnFamily = new FamilyFilter(CompareOp.EQUAL, new BinaryComparator(FlowRunColumnFamily.INFO.getBytes()));
TimelineDataToRetrieve dataToRetrieve = getDataToRetrieve();
// Metrics are always returned if we are reading a single entity.
if (!isSingleEntityRead() && !hasField(dataToRetrieve.getFieldsToRetrieve(), Field.METRICS)) {
FilterList infoColFamilyList = new FilterList(Operator.MUST_PASS_ONE);
infoColFamilyList.addFilter(infoColumnFamily);
infoColFamilyList.addFilter(new QualifierFilter(CompareOp.NOT_EQUAL, new BinaryPrefixComparator(FlowRunColumnPrefix.METRIC.getColumnPrefixBytes(""))));
list.addFilter(infoColFamilyList);
} else {
// Check if metricsToRetrieve are specified and if they are, create a
// filter list for info column family by adding flow run tables columns
// and a list for metrics to retrieve. Pls note that fieldsToRetrieve
// will have METRICS added to it if metricsToRetrieve are specified
// (in augmentParams()).
TimelineFilterList metricsToRetrieve = dataToRetrieve.getMetricsToRetrieve();
if (metricsToRetrieve != null && !metricsToRetrieve.getFilterList().isEmpty()) {
FilterList infoColFamilyList = new FilterList();
infoColFamilyList.addFilter(infoColumnFamily);
FilterList columnsList = updateFixedColumns();
columnsList.addFilter(TimelineFilterUtils.createHBaseFilterList(FlowRunColumnPrefix.METRIC, metricsToRetrieve));
infoColFamilyList.addFilter(columnsList);
list.addFilter(infoColFamilyList);
}
}
return list;
}
use of org.apache.hadoop.hbase.filter.FilterList in project hadoop by apache.
the class FlowRunEntityReader method getResults.
@Override
protected ResultScanner getResults(Configuration hbaseConf, Connection conn, FilterList filterList) throws IOException {
Scan scan = new Scan();
TimelineReaderContext context = getContext();
RowKeyPrefix<FlowRunRowKey> flowRunRowKeyPrefix = new FlowRunRowKeyPrefix(context.getClusterId(), context.getUserId(), context.getFlowName());
scan.setRowPrefixFilter(flowRunRowKeyPrefix.getRowKeyPrefix());
FilterList newList = new FilterList();
newList.addFilter(new PageFilter(getFilters().getLimit()));
if (filterList != null && !filterList.getFilters().isEmpty()) {
newList.addFilter(filterList);
}
scan.setFilter(newList);
scan.setMaxVersions(Integer.MAX_VALUE);
return getTable().getResultScanner(hbaseConf, conn, scan);
}
use of org.apache.hadoop.hbase.filter.FilterList in project hadoop by apache.
the class GenericEntityReader method constructFilterListBasedOnFields.
@Override
protected FilterList constructFilterListBasedOnFields() throws IOException {
if (!needCreateFilterListBasedOnFields()) {
// Fetch all the columns. No need of a filter.
return null;
}
FilterList listBasedOnFields = new FilterList(Operator.MUST_PASS_ONE);
FilterList infoColFamilyList = new FilterList();
// By default fetch everything in INFO column family.
FamilyFilter infoColumnFamily = new FamilyFilter(CompareOp.EQUAL, new BinaryComparator(EntityColumnFamily.INFO.getBytes()));
infoColFamilyList.addFilter(infoColumnFamily);
if (!isSingleEntityRead() && fetchPartialColsFromInfoFamily()) {
// We can fetch only some of the columns from info family.
infoColFamilyList.addFilter(createFilterListForColsOfInfoFamily());
} else {
// Exclude column prefixes in info column family which are not required
// based on fields to retrieve.
excludeFieldsFromInfoColFamily(infoColFamilyList);
}
listBasedOnFields.addFilter(infoColFamilyList);
updateFilterForConfsAndMetricsToRetrieve(listBasedOnFields);
return listBasedOnFields;
}
use of org.apache.hadoop.hbase.filter.FilterList in project hadoop by apache.
the class TimelineEntityReader method createFiltersFromColumnQualifiers.
/**
* Create a filter list of qualifier filters based on passed set of columns.
*
* @param <T> Describes the type of column prefix.
* @param colPrefix Column Prefix.
* @param columns set of column qualifiers.
* @return filter list.
*/
protected <T> FilterList createFiltersFromColumnQualifiers(ColumnPrefix<T> colPrefix, Set<String> columns) {
FilterList list = new FilterList(Operator.MUST_PASS_ONE);
for (String column : columns) {
// For columns which have compound column qualifiers (eg. events), we need
// to include the required separator.
byte[] compoundColQual = createColQualifierPrefix(colPrefix, column);
list.addFilter(new QualifierFilter(CompareOp.EQUAL, new BinaryPrefixComparator(colPrefix.getColumnPrefixBytes(compoundColQual))));
}
return list;
}
Aggregations