use of org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter in project phoenix by apache.
the class MetaDataEndpointImpl method buildDeletedTable.
private PTable buildDeletedTable(byte[] key, ImmutableBytesPtr cacheKey, Region region, long clientTimeStamp) throws IOException {
if (clientTimeStamp == HConstants.LATEST_TIMESTAMP) {
return null;
}
Scan scan = MetaDataUtil.newTableRowsScan(key, clientTimeStamp, HConstants.LATEST_TIMESTAMP);
scan.setFilter(new FirstKeyOnlyFilter());
scan.setRaw(true);
List<Cell> results = Lists.<Cell>newArrayList();
try (RegionScanner scanner = region.getScanner(scan)) {
scanner.next(results);
}
for (Cell kv : results) {
KeyValue.Type type = Type.codeToType(kv.getTypeByte());
if (type == Type.DeleteFamily) {
// Row was deleted
Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
PTable table = newDeletedTableMarker(kv.getTimestamp());
metaDataCache.put(cacheKey, table);
return table;
}
}
return null;
}
use of org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter in project phoenix by apache.
the class ExplainTable method explain.
protected void explain(String prefix, List<String> planSteps) {
StringBuilder buf = new StringBuilder(prefix);
ScanRanges scanRanges = context.getScanRanges();
Scan scan = context.getScan();
if (scan.getConsistency() != Consistency.STRONG) {
buf.append("TIMELINE-CONSISTENCY ");
}
if (hint.hasHint(Hint.SMALL)) {
buf.append(Hint.SMALL).append(" ");
}
if (OrderBy.REV_ROW_KEY_ORDER_BY.equals(orderBy)) {
buf.append("REVERSE ");
}
if (scanRanges.isEverything()) {
buf.append("FULL SCAN ");
} else {
explainSkipScan(buf);
}
buf.append("OVER ").append(tableRef.getTable().getPhysicalName().getString());
if (!scanRanges.isPointLookup()) {
appendKeyRanges(buf);
}
planSteps.add(buf.toString());
if (context.getScan() != null && tableRef.getTable().getRowTimestampColPos() != -1) {
TimeRange range = context.getScan().getTimeRange();
planSteps.add(" ROW TIMESTAMP FILTER [" + range.getMin() + ", " + range.getMax() + ")");
}
PageFilter pageFilter = null;
FirstKeyOnlyFilter firstKeyOnlyFilter = null;
BooleanExpressionFilter whereFilter = null;
DistinctPrefixFilter distinctFilter = null;
Iterator<Filter> filterIterator = ScanUtil.getFilterIterator(scan);
if (filterIterator.hasNext()) {
do {
Filter filter = filterIterator.next();
if (filter instanceof FirstKeyOnlyFilter) {
firstKeyOnlyFilter = (FirstKeyOnlyFilter) filter;
} else if (filter instanceof PageFilter) {
pageFilter = (PageFilter) filter;
} else if (filter instanceof BooleanExpressionFilter) {
whereFilter = (BooleanExpressionFilter) filter;
} else if (filter instanceof DistinctPrefixFilter) {
distinctFilter = (DistinctPrefixFilter) filter;
}
} while (filterIterator.hasNext());
}
if (whereFilter != null) {
planSteps.add(" SERVER FILTER BY " + (firstKeyOnlyFilter == null ? "" : "FIRST KEY ONLY AND ") + whereFilter.toString());
} else if (firstKeyOnlyFilter != null) {
planSteps.add(" SERVER FILTER BY FIRST KEY ONLY");
}
if (distinctFilter != null) {
planSteps.add(" SERVER DISTINCT PREFIX FILTER OVER " + groupBy.getExpressions().toString());
}
if (!orderBy.getOrderByExpressions().isEmpty() && groupBy.isEmpty()) {
// with GROUP BY, sort happens client-side
planSteps.add(" SERVER" + (limit == null ? "" : " TOP " + limit + " ROW" + (limit == 1 ? "" : "S")) + " SORTED BY " + orderBy.getOrderByExpressions().toString());
} else {
if (offset != null) {
planSteps.add(" SERVER OFFSET " + offset);
}
if (pageFilter != null) {
planSteps.add(" SERVER " + pageFilter.getPageSize() + " ROW LIMIT");
}
}
Integer groupByLimit = null;
byte[] groupByLimitBytes = scan.getAttribute(BaseScannerRegionObserver.GROUP_BY_LIMIT);
if (groupByLimitBytes != null) {
groupByLimit = (Integer) PInteger.INSTANCE.toObject(groupByLimitBytes);
}
groupBy.explain(planSteps, groupByLimit);
if (scan.getAttribute(BaseScannerRegionObserver.SPECIFIC_ARRAY_INDEX) != null) {
planSteps.add(" SERVER ARRAY ELEMENT PROJECTION");
}
}
use of org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter in project cdap by caskdata.
the class HBaseMetadataTable method scanTopics.
/**
* Scans the HBase table to get a list of {@link TopicId}.
*/
private List<TopicId> scanTopics(ScanBuilder scanBuilder) throws IOException {
Scan scan = scanBuilder.setFilter(new FirstKeyOnlyFilter()).setCaching(scanCacheRows).build();
try {
List<TopicId> topicIds = new ArrayList<>();
try (ResultScanner resultScanner = hTable.getScanner(scan)) {
for (Result result : resultScanner) {
TopicId topicId = MessagingUtils.toTopicId(result.getRow());
byte[] value = result.getValue(columnFamily, COL);
Map<String, String> properties = GSON.fromJson(Bytes.toString(value), MAP_TYPE);
TopicMetadata metadata = new TopicMetadata(topicId, properties);
if (metadata.exists()) {
topicIds.add(topicId);
}
}
}
return topicIds;
} catch (IOException e) {
throw exceptionHandler.handle(e);
}
}
Aggregations