use of org.apache.phoenix.iterate.MergeSortTopNResultIterator in project phoenix by apache.
the class ScanPlan method newIterator.
@Override
protected ResultIterator newIterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
// Set any scan attributes before creating the scanner, as it will be too late afterwards
scan.setAttribute(BaseScannerRegionObserver.NON_AGGREGATE_QUERY, QueryConstants.TRUE);
ResultIterator scanner;
TableRef tableRef = this.getTableRef();
PTable table = tableRef.getTable();
boolean isSalted = table.getBucketNum() != null;
/* If no limit or topN, use parallel iterator so that we get results faster. Otherwise, if
* limit is provided, run query serially.
*/
boolean isOrdered = !orderBy.getOrderByExpressions().isEmpty();
Integer perScanLimit = !allowPageFilter || isOrdered ? null : QueryUtil.getOffsetLimit(limit, offset);
boolean isOffsetOnServer = isOffsetPossibleOnServer(context, orderBy, offset, isSalted, table.getIndexType());
/*
* For queries that are doing a row key order by and are not possibly querying more than a
* threshold worth of data, then we only need to initialize scanners corresponding to the
* first (or last, if reverse) scan per region.
*/
boolean initFirstScanOnly = (orderBy == OrderBy.FWD_ROW_KEY_ORDER_BY || orderBy == OrderBy.REV_ROW_KEY_ORDER_BY) && isDataToScanWithinThreshold;
BaseResultIterators iterators;
if (isOffsetOnServer) {
iterators = new SerialIterators(this, perScanLimit, offset, parallelIteratorFactory, scanGrouper, scan);
} else if (isSerial) {
iterators = new SerialIterators(this, perScanLimit, null, parallelIteratorFactory, scanGrouper, scan);
} else {
iterators = new ParallelIterators(this, perScanLimit, parallelIteratorFactory, scanGrouper, scan, initFirstScanOnly);
}
estimatedRows = iterators.getEstimatedRowCount();
estimatedSize = iterators.getEstimatedByteCount();
splits = iterators.getSplits();
scans = iterators.getScans();
if (isOffsetOnServer) {
scanner = new ConcatResultIterator(iterators);
if (limit != null) {
scanner = new LimitingResultIterator(scanner, limit);
}
} else if (isOrdered) {
scanner = new MergeSortTopNResultIterator(iterators, limit, offset, orderBy.getOrderByExpressions());
} else {
if ((isSalted || table.getIndexType() == IndexType.LOCAL) && ScanUtil.shouldRowsBeInRowKeyOrder(orderBy, context)) {
/*
* For salted tables or local index, a merge sort is needed if:
* 1) The config phoenix.query.force.rowkeyorder is set to true
* 2) Or if the query has an order by that wants to sort
* the results by the row key (forward or reverse ordering)
*/
scanner = new MergeSortRowKeyResultIterator(iterators, isSalted ? SaltingUtil.NUM_SALTING_BYTES : 0, orderBy == OrderBy.REV_ROW_KEY_ORDER_BY);
} else if (useRoundRobinIterator()) {
/*
* For any kind of tables, round robin is possible if there is
* no ordering of rows needed.
*/
scanner = new RoundRobinResultIterator(iterators, this);
} else {
scanner = new ConcatResultIterator(iterators);
}
if (offset != null) {
scanner = new OffsetResultIterator(scanner, offset);
}
if (limit != null) {
scanner = new LimitingResultIterator(scanner, limit);
}
}
if (context.getSequenceManager().getSequenceCount() > 0) {
scanner = new SequenceResultIterator(scanner, context.getSequenceManager());
}
return scanner;
}
use of org.apache.phoenix.iterate.MergeSortTopNResultIterator in project phoenix by apache.
the class UnionPlan method iterator.
@Override
public final ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
this.iterators = new UnionResultIterators(plans, parentContext);
ResultIterator scanner;
boolean isOrdered = !orderBy.getOrderByExpressions().isEmpty();
if (isOrdered) {
// TopN
scanner = new MergeSortTopNResultIterator(iterators, limit, offset, orderBy.getOrderByExpressions());
} else {
scanner = new ConcatResultIterator(iterators);
if (offset != null) {
scanner = new OffsetResultIterator(scanner, offset);
}
if (limit != null) {
scanner = new LimitingResultIterator(scanner, limit);
}
}
return scanner;
}
Aggregations