use of org.apache.phoenix.filter.DistinctPrefixFilter in project phoenix by apache.
the class BaseResultIterators method initializeScan.
private static void initializeScan(QueryPlan plan, Integer perScanLimit, Integer offset, Scan scan) throws SQLException {
StatementContext context = plan.getContext();
TableRef tableRef = plan.getTableRef();
PTable table = tableRef.getTable();
Map<byte[], NavigableSet<byte[]>> familyMap = scan.getFamilyMap();
// Hack for PHOENIX-2067 to force raw scan over all KeyValues to fix their row keys
if (context.getConnection().isDescVarLengthRowKeyUpgrade()) {
// We project *all* KeyValues across all column families as we make a pass over
// a physical table and we want to make sure we catch all KeyValues that may be
// dynamic or part of an updatable view.
familyMap.clear();
scan.setMaxVersions();
// Remove any filter
scan.setFilter(null);
// Traverse (and subsequently clone) all KeyValues
scan.setRaw(true);
// Pass over PTable so we can re-write rows according to the row key schema
scan.setAttribute(BaseScannerRegionObserver.UPGRADE_DESC_ROW_KEY, UngroupedAggregateRegionObserver.serialize(table));
} else {
FilterableStatement statement = plan.getStatement();
RowProjector projector = plan.getProjector();
boolean optimizeProjection = false;
boolean keyOnlyFilter = familyMap.isEmpty() && context.getWhereConditionColumns().isEmpty();
if (!projector.projectEverything()) {
// not match the actual column families of the table (which is bad).
if (keyOnlyFilter && table.getColumnFamilies().size() == 1) {
// Project the one column family. We must project a column family since it's possible
// that there are other non declared column families that we need to ignore.
scan.addFamily(table.getColumnFamilies().get(0).getName().getBytes());
} else {
optimizeProjection = true;
if (projector.projectEveryRow()) {
if (table.getViewType() == ViewType.MAPPED) {
// Since we don't have the empty key value in MAPPED tables,
// we must project all CFs in HRS. However, only the
// selected column values are returned back to client.
context.getWhereConditionColumns().clear();
for (PColumnFamily family : table.getColumnFamilies()) {
context.addWhereConditionColumn(family.getName().getBytes(), null);
}
} else {
byte[] ecf = SchemaUtil.getEmptyColumnFamily(table);
// been projected in its entirety.
if (!familyMap.containsKey(ecf) || familyMap.get(ecf) != null) {
scan.addColumn(ecf, EncodedColumnsUtil.getEmptyKeyValueInfo(table).getFirst());
}
}
}
}
}
// Add FirstKeyOnlyFilter if there are no references to key value columns
if (keyOnlyFilter) {
ScanUtil.andFilterAtBeginning(scan, new FirstKeyOnlyFilter());
}
if (perScanLimit != null) {
ScanUtil.andFilterAtEnd(scan, new PageFilter(perScanLimit));
}
if (offset != null) {
ScanUtil.addOffsetAttribute(scan, offset);
}
int cols = plan.getGroupBy().getOrderPreservingColumnCount();
if (cols > 0 && keyOnlyFilter && !plan.getStatement().getHint().hasHint(HintNode.Hint.RANGE_SCAN) && cols < plan.getTableRef().getTable().getRowKeySchema().getFieldCount() && plan.getGroupBy().isOrderPreserving() && (context.getAggregationManager().isEmpty() || plan.getGroupBy().isUngroupedAggregate())) {
ScanUtil.andFilterAtEnd(scan, new DistinctPrefixFilter(plan.getTableRef().getTable().getRowKeySchema(), cols));
if (plan.getLimit() != null) {
// We can push the limit to the server
ScanUtil.andFilterAtEnd(scan, new PageFilter(plan.getLimit()));
}
}
scan.setAttribute(BaseScannerRegionObserver.QUALIFIER_ENCODING_SCHEME, new byte[] { table.getEncodingScheme().getSerializedMetadataValue() });
scan.setAttribute(BaseScannerRegionObserver.IMMUTABLE_STORAGE_ENCODING_SCHEME, new byte[] { table.getImmutableStorageScheme().getSerializedMetadataValue() });
// we use this flag on the server side to determine which value column qualifier to use in the key value we return from server.
scan.setAttribute(BaseScannerRegionObserver.USE_NEW_VALUE_COLUMN_QUALIFIER, Bytes.toBytes(true));
// So there is no point setting the range.
if (!ScanUtil.isAnalyzeTable(scan)) {
setQualifierRanges(keyOnlyFilter, table, scan, context);
}
if (optimizeProjection) {
optimizeProjection(context, scan, table, statement);
}
}
}
use of org.apache.phoenix.filter.DistinctPrefixFilter in project phoenix by apache.
the class ScanUtil method setRowKeyOffset.
private static void setRowKeyOffset(Filter filter, int offset) {
if (filter instanceof BooleanExpressionFilter) {
BooleanExpressionFilter boolFilter = (BooleanExpressionFilter) filter;
IndexUtil.setRowKeyExpressionOffset(boolFilter.getExpression(), offset);
} else if (filter instanceof SkipScanFilter) {
SkipScanFilter skipScanFilter = (SkipScanFilter) filter;
skipScanFilter.setOffset(offset);
} else if (filter instanceof DistinctPrefixFilter) {
DistinctPrefixFilter prefixFilter = (DistinctPrefixFilter) filter;
prefixFilter.setOffset(offset);
}
}
use of org.apache.phoenix.filter.DistinctPrefixFilter in project phoenix by apache.
the class ExplainTable method explain.
protected void explain(String prefix, List<String> planSteps) {
StringBuilder buf = new StringBuilder(prefix);
ScanRanges scanRanges = context.getScanRanges();
Scan scan = context.getScan();
if (scan.getConsistency() != Consistency.STRONG) {
buf.append("TIMELINE-CONSISTENCY ");
}
if (hint.hasHint(Hint.SMALL)) {
buf.append(Hint.SMALL).append(" ");
}
if (OrderBy.REV_ROW_KEY_ORDER_BY.equals(orderBy)) {
buf.append("REVERSE ");
}
if (scanRanges.isEverything()) {
buf.append("FULL SCAN ");
} else {
explainSkipScan(buf);
}
buf.append("OVER ").append(tableRef.getTable().getPhysicalName().getString());
if (!scanRanges.isPointLookup()) {
appendKeyRanges(buf);
}
planSteps.add(buf.toString());
if (context.getScan() != null && tableRef.getTable().getRowTimestampColPos() != -1) {
TimeRange range = context.getScan().getTimeRange();
planSteps.add(" ROW TIMESTAMP FILTER [" + range.getMin() + ", " + range.getMax() + ")");
}
PageFilter pageFilter = null;
FirstKeyOnlyFilter firstKeyOnlyFilter = null;
BooleanExpressionFilter whereFilter = null;
DistinctPrefixFilter distinctFilter = null;
Iterator<Filter> filterIterator = ScanUtil.getFilterIterator(scan);
if (filterIterator.hasNext()) {
do {
Filter filter = filterIterator.next();
if (filter instanceof FirstKeyOnlyFilter) {
firstKeyOnlyFilter = (FirstKeyOnlyFilter) filter;
} else if (filter instanceof PageFilter) {
pageFilter = (PageFilter) filter;
} else if (filter instanceof BooleanExpressionFilter) {
whereFilter = (BooleanExpressionFilter) filter;
} else if (filter instanceof DistinctPrefixFilter) {
distinctFilter = (DistinctPrefixFilter) filter;
}
} while (filterIterator.hasNext());
}
if (whereFilter != null) {
planSteps.add(" SERVER FILTER BY " + (firstKeyOnlyFilter == null ? "" : "FIRST KEY ONLY AND ") + whereFilter.toString());
} else if (firstKeyOnlyFilter != null) {
planSteps.add(" SERVER FILTER BY FIRST KEY ONLY");
}
if (distinctFilter != null) {
planSteps.add(" SERVER DISTINCT PREFIX FILTER OVER " + groupBy.getExpressions().toString());
}
if (!orderBy.getOrderByExpressions().isEmpty() && groupBy.isEmpty()) {
// with GROUP BY, sort happens client-side
planSteps.add(" SERVER" + (limit == null ? "" : " TOP " + limit + " ROW" + (limit == 1 ? "" : "S")) + " SORTED BY " + orderBy.getOrderByExpressions().toString());
} else {
if (offset != null) {
planSteps.add(" SERVER OFFSET " + offset);
}
if (pageFilter != null) {
planSteps.add(" SERVER " + pageFilter.getPageSize() + " ROW LIMIT");
}
}
Integer groupByLimit = null;
byte[] groupByLimitBytes = scan.getAttribute(BaseScannerRegionObserver.GROUP_BY_LIMIT);
if (groupByLimitBytes != null) {
groupByLimit = (Integer) PInteger.INSTANCE.toObject(groupByLimitBytes);
}
groupBy.explain(planSteps, groupByLimit);
if (scan.getAttribute(BaseScannerRegionObserver.SPECIFIC_ARRAY_INDEX) != null) {
planSteps.add(" SERVER ARRAY ELEMENT PROJECTION");
}
}
Aggregations