Search in sources :

Example 1 with ConcatResultIterator

use of org.apache.phoenix.iterate.ConcatResultIterator in project phoenix by apache.

the class AggregatePlan method newIterator.

@Override
protected ResultIterator newIterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
    if (groupBy.isEmpty()) {
        UngroupedAggregateRegionObserver.serializeIntoScan(scan);
    } else {
        // Set attribute with serialized expressions for coprocessor
        GroupedAggregateRegionObserver.serializeIntoScan(scan, groupBy.getScanAttribName(), groupBy.getKeyExpressions());
        if (limit != null && orderBy.getOrderByExpressions().isEmpty() && having == null && ((statement.isDistinct() && !statement.isAggregate()) || (!statement.isDistinct() && (context.getAggregationManager().isEmpty() || BaseScannerRegionObserver.KEY_ORDERED_GROUP_BY_EXPRESSIONS.equals(groupBy.getScanAttribName()))))) {
            /*
                 * Optimization to early exit from the scan for a GROUP BY or DISTINCT with a LIMIT.
                 * We may exit early according to the LIMIT specified if the query has:
                 * 1) No ORDER BY clause (or the ORDER BY was optimized out). We cannot exit
                 *    early if there's an ORDER BY because the first group may be found last
                 *    in the scan.
                 * 2) No HAVING clause, since we execute the HAVING on the client side. The LIMIT
                 *    needs to be evaluated *after* the HAVING.
                 * 3) DISTINCT clause with no GROUP BY. We cannot exit early if there's a
                 *    GROUP BY, as the GROUP BY is processed on the client-side post aggregation
                 *    if a DISTNCT has a GROUP BY. Otherwise, since there are no aggregate
                 *    functions in a DISTINCT, we can exit early regardless of if the
                 *    groups are in row key order or unordered.
                 * 4) GROUP BY clause with no aggregate functions. This is in the same category
                 *    as (3). If we're using aggregate functions, we need to look at all the
                 *    rows, as otherwise we'd exit early with incorrect aggregate function
                 *    calculations.
                 * 5) GROUP BY clause along the pk axis, as the rows are processed in row key
                 *    order, so we can early exit, even when aggregate functions are used, as
                 *    the rows in the group are contiguous.
                 */
            scan.setAttribute(BaseScannerRegionObserver.GROUP_BY_LIMIT, PInteger.INSTANCE.toBytes(limit + (offset == null ? 0 : offset)));
        }
    }
    BaseResultIterators iterators = isSerial ? new SerialIterators(this, null, null, wrapParallelIteratorFactory(), scanGrouper, scan) : new ParallelIterators(this, null, wrapParallelIteratorFactory(), scan, false);
    estimatedRows = iterators.getEstimatedRowCount();
    estimatedSize = iterators.getEstimatedByteCount();
    splits = iterators.getSplits();
    scans = iterators.getScans();
    AggregatingResultIterator aggResultIterator;
    // No need to merge sort for ungrouped aggregation
    if (groupBy.isEmpty() || groupBy.isUngroupedAggregate()) {
        aggResultIterator = new UngroupedAggregatingResultIterator(new ConcatResultIterator(iterators), aggregators);
    // If salted or local index we still need a merge sort as we'll potentially have multiple group by keys that aren't contiguous.
    } else if (groupBy.isOrderPreserving() && !(this.getTableRef().getTable().getBucketNum() != null || this.getTableRef().getTable().getIndexType() == IndexType.LOCAL)) {
        aggResultIterator = new RowKeyOrderedAggregateResultIterator(iterators, aggregators);
    } else {
        aggResultIterator = new GroupedAggregatingResultIterator(new MergeSortRowKeyResultIterator(iterators, 0, this.getOrderBy() == OrderBy.REV_ROW_KEY_ORDER_BY), aggregators);
    }
    if (having != null) {
        aggResultIterator = new FilterAggregatingResultIterator(aggResultIterator, having);
    }
    if (statement.isDistinct() && statement.isAggregate()) {
        // Dedup on client if select distinct and aggregation
        aggResultIterator = new DistinctAggregatingResultIterator(aggResultIterator, getProjector());
    }
    ResultIterator resultScanner = aggResultIterator;
    if (orderBy.getOrderByExpressions().isEmpty()) {
        if (offset != null) {
            resultScanner = new OffsetResultIterator(aggResultIterator, offset);
        }
        if (limit != null) {
            resultScanner = new LimitingResultIterator(resultScanner, limit);
        }
    } else {
        int thresholdBytes = context.getConnection().getQueryServices().getProps().getInt(QueryServices.SPOOL_THRESHOLD_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_SPOOL_THRESHOLD_BYTES);
        resultScanner = new OrderedAggregatingResultIterator(aggResultIterator, orderBy.getOrderByExpressions(), thresholdBytes, limit, offset);
    }
    if (context.getSequenceManager().getSequenceCount() > 0) {
        resultScanner = new SequenceResultIterator(resultScanner, context.getSequenceManager());
    }
    return resultScanner;
}
Also used : ParallelIterators(org.apache.phoenix.iterate.ParallelIterators) MergeSortRowKeyResultIterator(org.apache.phoenix.iterate.MergeSortRowKeyResultIterator) OffsetResultIterator(org.apache.phoenix.iterate.OffsetResultIterator) GroupedAggregatingResultIterator(org.apache.phoenix.iterate.GroupedAggregatingResultIterator) SerialIterators(org.apache.phoenix.iterate.SerialIterators) SequenceResultIterator(org.apache.phoenix.iterate.SequenceResultIterator) RowKeyOrderedAggregateResultIterator(org.apache.phoenix.iterate.RowKeyOrderedAggregateResultIterator) MergeSortRowKeyResultIterator(org.apache.phoenix.iterate.MergeSortRowKeyResultIterator) OrderedResultIterator(org.apache.phoenix.iterate.OrderedResultIterator) SpoolingResultIterator(org.apache.phoenix.iterate.SpoolingResultIterator) UngroupedAggregatingResultIterator(org.apache.phoenix.iterate.UngroupedAggregatingResultIterator) ConcatResultIterator(org.apache.phoenix.iterate.ConcatResultIterator) AggregatingResultIterator(org.apache.phoenix.iterate.AggregatingResultIterator) GroupedAggregatingResultIterator(org.apache.phoenix.iterate.GroupedAggregatingResultIterator) FilterAggregatingResultIterator(org.apache.phoenix.iterate.FilterAggregatingResultIterator) ResultIterator(org.apache.phoenix.iterate.ResultIterator) DistinctAggregatingResultIterator(org.apache.phoenix.iterate.DistinctAggregatingResultIterator) PeekingResultIterator(org.apache.phoenix.iterate.PeekingResultIterator) SequenceResultIterator(org.apache.phoenix.iterate.SequenceResultIterator) LimitingResultIterator(org.apache.phoenix.iterate.LimitingResultIterator) RowKeyOrderedAggregateResultIterator(org.apache.phoenix.iterate.RowKeyOrderedAggregateResultIterator) OrderedAggregatingResultIterator(org.apache.phoenix.iterate.OrderedAggregatingResultIterator) OffsetResultIterator(org.apache.phoenix.iterate.OffsetResultIterator) BaseResultIterators(org.apache.phoenix.iterate.BaseResultIterators) LimitingResultIterator(org.apache.phoenix.iterate.LimitingResultIterator) UngroupedAggregatingResultIterator(org.apache.phoenix.iterate.UngroupedAggregatingResultIterator) AggregatingResultIterator(org.apache.phoenix.iterate.AggregatingResultIterator) GroupedAggregatingResultIterator(org.apache.phoenix.iterate.GroupedAggregatingResultIterator) FilterAggregatingResultIterator(org.apache.phoenix.iterate.FilterAggregatingResultIterator) DistinctAggregatingResultIterator(org.apache.phoenix.iterate.DistinctAggregatingResultIterator) OrderedAggregatingResultIterator(org.apache.phoenix.iterate.OrderedAggregatingResultIterator) ConcatResultIterator(org.apache.phoenix.iterate.ConcatResultIterator) DistinctAggregatingResultIterator(org.apache.phoenix.iterate.DistinctAggregatingResultIterator) UngroupedAggregatingResultIterator(org.apache.phoenix.iterate.UngroupedAggregatingResultIterator) FilterAggregatingResultIterator(org.apache.phoenix.iterate.FilterAggregatingResultIterator) OrderedAggregatingResultIterator(org.apache.phoenix.iterate.OrderedAggregatingResultIterator)

Example 2 with ConcatResultIterator

use of org.apache.phoenix.iterate.ConcatResultIterator in project phoenix by apache.

the class AggregatePlan method newIterator.

@Override
protected ResultIterator newIterator(ParallelScanGrouper scanGrouper, Scan scan, Map<ImmutableBytesPtr, ServerCache> caches) throws SQLException {
    if (groupBy.isEmpty()) {
        UngroupedAggregateRegionObserver.serializeIntoScan(scan);
    } else {
        // Set attribute with serialized expressions for coprocessor
        GroupedAggregateRegionObserver.serializeIntoScan(scan, groupBy.getScanAttribName(), groupBy.getKeyExpressions());
        if (limit != null && orderBy.getOrderByExpressions().isEmpty() && having == null && ((statement.isDistinct() && !statement.isAggregate()) || (!statement.isDistinct() && (context.getAggregationManager().isEmpty() || BaseScannerRegionObserver.KEY_ORDERED_GROUP_BY_EXPRESSIONS.equals(groupBy.getScanAttribName()))))) {
            /*
                 * Optimization to early exit from the scan for a GROUP BY or DISTINCT with a LIMIT.
                 * We may exit early according to the LIMIT specified if the query has:
                 * 1) No ORDER BY clause (or the ORDER BY was optimized out). We cannot exit
                 *    early if there's an ORDER BY because the first group may be found last
                 *    in the scan.
                 * 2) No HAVING clause, since we execute the HAVING on the client side. The LIMIT
                 *    needs to be evaluated *after* the HAVING.
                 * 3) DISTINCT clause with no GROUP BY. We cannot exit early if there's a
                 *    GROUP BY, as the GROUP BY is processed on the client-side post aggregation
                 *    if a DISTNCT has a GROUP BY. Otherwise, since there are no aggregate
                 *    functions in a DISTINCT, we can exit early regardless of if the
                 *    groups are in row key order or unordered.
                 * 4) GROUP BY clause with no aggregate functions. This is in the same category
                 *    as (3). If we're using aggregate functions, we need to look at all the
                 *    rows, as otherwise we'd exit early with incorrect aggregate function
                 *    calculations.
                 * 5) GROUP BY clause along the pk axis, as the rows are processed in row key
                 *    order, so we can early exit, even when aggregate functions are used, as
                 *    the rows in the group are contiguous.
                 */
            scan.setAttribute(BaseScannerRegionObserver.GROUP_BY_LIMIT, PInteger.INSTANCE.toBytes(limit + (offset == null ? 0 : offset)));
        }
    }
    BaseResultIterators iterators = isSerial ? new SerialIterators(this, null, null, wrapParallelIteratorFactory(), scanGrouper, scan, caches, dataPlan) : new ParallelIterators(this, null, wrapParallelIteratorFactory(), scan, false, caches, dataPlan);
    estimatedRows = iterators.getEstimatedRowCount();
    estimatedSize = iterators.getEstimatedByteCount();
    estimateInfoTimestamp = iterators.getEstimateInfoTimestamp();
    splits = iterators.getSplits();
    scans = iterators.getScans();
    AggregatingResultIterator aggResultIterator;
    // No need to merge sort for ungrouped aggregation
    if (groupBy.isEmpty() || groupBy.isUngroupedAggregate()) {
        aggResultIterator = new UngroupedAggregatingResultIterator(new ConcatResultIterator(iterators), aggregators);
    // If salted or local index we still need a merge sort as we'll potentially have multiple group by keys that aren't contiguous.
    } else if (groupBy.isOrderPreserving() && !(this.getTableRef().getTable().getBucketNum() != null || this.getTableRef().getTable().getIndexType() == IndexType.LOCAL)) {
        aggResultIterator = new RowKeyOrderedAggregateResultIterator(iterators, aggregators);
    } else {
        aggResultIterator = new GroupedAggregatingResultIterator(new MergeSortRowKeyResultIterator(iterators, 0, this.getOrderBy() == OrderBy.REV_ROW_KEY_ORDER_BY), aggregators);
    }
    if (having != null) {
        aggResultIterator = new FilterAggregatingResultIterator(aggResultIterator, having);
    }
    if (statement.isDistinct() && statement.isAggregate()) {
        // Dedup on client if select distinct and aggregation
        aggResultIterator = new DistinctAggregatingResultIterator(aggResultIterator, getProjector());
    }
    ResultIterator resultScanner = aggResultIterator;
    if (orderBy.getOrderByExpressions().isEmpty()) {
        if (offset != null) {
            resultScanner = new OffsetResultIterator(aggResultIterator, offset);
        }
        if (limit != null) {
            resultScanner = new LimitingResultIterator(resultScanner, limit);
        }
    } else {
        int thresholdBytes = context.getConnection().getQueryServices().getProps().getInt(QueryServices.SPOOL_THRESHOLD_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_SPOOL_THRESHOLD_BYTES);
        resultScanner = new OrderedAggregatingResultIterator(aggResultIterator, orderBy.getOrderByExpressions(), thresholdBytes, limit, offset);
    }
    if (context.getSequenceManager().getSequenceCount() > 0) {
        resultScanner = new SequenceResultIterator(resultScanner, context.getSequenceManager());
    }
    return resultScanner;
}
Also used : ParallelIterators(org.apache.phoenix.iterate.ParallelIterators) MergeSortRowKeyResultIterator(org.apache.phoenix.iterate.MergeSortRowKeyResultIterator) OffsetResultIterator(org.apache.phoenix.iterate.OffsetResultIterator) GroupedAggregatingResultIterator(org.apache.phoenix.iterate.GroupedAggregatingResultIterator) SerialIterators(org.apache.phoenix.iterate.SerialIterators) SequenceResultIterator(org.apache.phoenix.iterate.SequenceResultIterator) RowKeyOrderedAggregateResultIterator(org.apache.phoenix.iterate.RowKeyOrderedAggregateResultIterator) MergeSortRowKeyResultIterator(org.apache.phoenix.iterate.MergeSortRowKeyResultIterator) OrderedResultIterator(org.apache.phoenix.iterate.OrderedResultIterator) SpoolingResultIterator(org.apache.phoenix.iterate.SpoolingResultIterator) UngroupedAggregatingResultIterator(org.apache.phoenix.iterate.UngroupedAggregatingResultIterator) ConcatResultIterator(org.apache.phoenix.iterate.ConcatResultIterator) AggregatingResultIterator(org.apache.phoenix.iterate.AggregatingResultIterator) GroupedAggregatingResultIterator(org.apache.phoenix.iterate.GroupedAggregatingResultIterator) FilterAggregatingResultIterator(org.apache.phoenix.iterate.FilterAggregatingResultIterator) ResultIterator(org.apache.phoenix.iterate.ResultIterator) DistinctAggregatingResultIterator(org.apache.phoenix.iterate.DistinctAggregatingResultIterator) PeekingResultIterator(org.apache.phoenix.iterate.PeekingResultIterator) SequenceResultIterator(org.apache.phoenix.iterate.SequenceResultIterator) LimitingResultIterator(org.apache.phoenix.iterate.LimitingResultIterator) RowKeyOrderedAggregateResultIterator(org.apache.phoenix.iterate.RowKeyOrderedAggregateResultIterator) OrderedAggregatingResultIterator(org.apache.phoenix.iterate.OrderedAggregatingResultIterator) OffsetResultIterator(org.apache.phoenix.iterate.OffsetResultIterator) BaseResultIterators(org.apache.phoenix.iterate.BaseResultIterators) LimitingResultIterator(org.apache.phoenix.iterate.LimitingResultIterator) UngroupedAggregatingResultIterator(org.apache.phoenix.iterate.UngroupedAggregatingResultIterator) AggregatingResultIterator(org.apache.phoenix.iterate.AggregatingResultIterator) GroupedAggregatingResultIterator(org.apache.phoenix.iterate.GroupedAggregatingResultIterator) FilterAggregatingResultIterator(org.apache.phoenix.iterate.FilterAggregatingResultIterator) DistinctAggregatingResultIterator(org.apache.phoenix.iterate.DistinctAggregatingResultIterator) OrderedAggregatingResultIterator(org.apache.phoenix.iterate.OrderedAggregatingResultIterator) ConcatResultIterator(org.apache.phoenix.iterate.ConcatResultIterator) DistinctAggregatingResultIterator(org.apache.phoenix.iterate.DistinctAggregatingResultIterator) UngroupedAggregatingResultIterator(org.apache.phoenix.iterate.UngroupedAggregatingResultIterator) FilterAggregatingResultIterator(org.apache.phoenix.iterate.FilterAggregatingResultIterator) OrderedAggregatingResultIterator(org.apache.phoenix.iterate.OrderedAggregatingResultIterator)

Example 3 with ConcatResultIterator

use of org.apache.phoenix.iterate.ConcatResultIterator in project phoenix by apache.

the class ScanPlan method newIterator.

@Override
protected ResultIterator newIterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
    // Set any scan attributes before creating the scanner, as it will be too late afterwards
    scan.setAttribute(BaseScannerRegionObserver.NON_AGGREGATE_QUERY, QueryConstants.TRUE);
    ResultIterator scanner;
    TableRef tableRef = this.getTableRef();
    PTable table = tableRef.getTable();
    boolean isSalted = table.getBucketNum() != null;
    /* If no limit or topN, use parallel iterator so that we get results faster. Otherwise, if
         * limit is provided, run query serially.
         */
    boolean isOrdered = !orderBy.getOrderByExpressions().isEmpty();
    Integer perScanLimit = !allowPageFilter || isOrdered ? null : QueryUtil.getOffsetLimit(limit, offset);
    boolean isOffsetOnServer = isOffsetPossibleOnServer(context, orderBy, offset, isSalted, table.getIndexType());
    /*
         * For queries that are doing a row key order by and are not possibly querying more than a
         * threshold worth of data, then we only need to initialize scanners corresponding to the
         * first (or last, if reverse) scan per region.
         */
    boolean initFirstScanOnly = (orderBy == OrderBy.FWD_ROW_KEY_ORDER_BY || orderBy == OrderBy.REV_ROW_KEY_ORDER_BY) && isDataToScanWithinThreshold;
    BaseResultIterators iterators;
    if (isOffsetOnServer) {
        iterators = new SerialIterators(this, perScanLimit, offset, parallelIteratorFactory, scanGrouper, scan);
    } else if (isSerial) {
        iterators = new SerialIterators(this, perScanLimit, null, parallelIteratorFactory, scanGrouper, scan);
    } else {
        iterators = new ParallelIterators(this, perScanLimit, parallelIteratorFactory, scanGrouper, scan, initFirstScanOnly);
    }
    estimatedRows = iterators.getEstimatedRowCount();
    estimatedSize = iterators.getEstimatedByteCount();
    splits = iterators.getSplits();
    scans = iterators.getScans();
    if (isOffsetOnServer) {
        scanner = new ConcatResultIterator(iterators);
        if (limit != null) {
            scanner = new LimitingResultIterator(scanner, limit);
        }
    } else if (isOrdered) {
        scanner = new MergeSortTopNResultIterator(iterators, limit, offset, orderBy.getOrderByExpressions());
    } else {
        if ((isSalted || table.getIndexType() == IndexType.LOCAL) && ScanUtil.shouldRowsBeInRowKeyOrder(orderBy, context)) {
            /*
                 * For salted tables or local index, a merge sort is needed if: 
                 * 1) The config phoenix.query.force.rowkeyorder is set to true 
                 * 2) Or if the query has an order by that wants to sort
                 * the results by the row key (forward or reverse ordering)
                 */
            scanner = new MergeSortRowKeyResultIterator(iterators, isSalted ? SaltingUtil.NUM_SALTING_BYTES : 0, orderBy == OrderBy.REV_ROW_KEY_ORDER_BY);
        } else if (useRoundRobinIterator()) {
            /*
                 * For any kind of tables, round robin is possible if there is
                 * no ordering of rows needed.
                 */
            scanner = new RoundRobinResultIterator(iterators, this);
        } else {
            scanner = new ConcatResultIterator(iterators);
        }
        if (offset != null) {
            scanner = new OffsetResultIterator(scanner, offset);
        }
        if (limit != null) {
            scanner = new LimitingResultIterator(scanner, limit);
        }
    }
    if (context.getSequenceManager().getSequenceCount() > 0) {
        scanner = new SequenceResultIterator(scanner, context.getSequenceManager());
    }
    return scanner;
}
Also used : ParallelIterators(org.apache.phoenix.iterate.ParallelIterators) MergeSortRowKeyResultIterator(org.apache.phoenix.iterate.MergeSortRowKeyResultIterator) OffsetResultIterator(org.apache.phoenix.iterate.OffsetResultIterator) SerialIterators(org.apache.phoenix.iterate.SerialIterators) MergeSortTopNResultIterator(org.apache.phoenix.iterate.MergeSortTopNResultIterator) SequenceResultIterator(org.apache.phoenix.iterate.SequenceResultIterator) MergeSortRowKeyResultIterator(org.apache.phoenix.iterate.MergeSortRowKeyResultIterator) SequenceResultIterator(org.apache.phoenix.iterate.SequenceResultIterator) SpoolingResultIterator(org.apache.phoenix.iterate.SpoolingResultIterator) LimitingResultIterator(org.apache.phoenix.iterate.LimitingResultIterator) OffsetResultIterator(org.apache.phoenix.iterate.OffsetResultIterator) ConcatResultIterator(org.apache.phoenix.iterate.ConcatResultIterator) RoundRobinResultIterator(org.apache.phoenix.iterate.RoundRobinResultIterator) ResultIterator(org.apache.phoenix.iterate.ResultIterator) MergeSortTopNResultIterator(org.apache.phoenix.iterate.MergeSortTopNResultIterator) ChunkedResultIterator(org.apache.phoenix.iterate.ChunkedResultIterator) BaseResultIterators(org.apache.phoenix.iterate.BaseResultIterators) LimitingResultIterator(org.apache.phoenix.iterate.LimitingResultIterator) RoundRobinResultIterator(org.apache.phoenix.iterate.RoundRobinResultIterator) PTable(org.apache.phoenix.schema.PTable) ConcatResultIterator(org.apache.phoenix.iterate.ConcatResultIterator) TableRef(org.apache.phoenix.schema.TableRef)

Example 4 with ConcatResultIterator

use of org.apache.phoenix.iterate.ConcatResultIterator in project phoenix by apache.

the class UnionPlan method iterator.

@Override
public final ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
    this.iterators = new UnionResultIterators(plans, parentContext);
    ResultIterator scanner;
    boolean isOrdered = !orderBy.getOrderByExpressions().isEmpty();
    if (isOrdered) {
        // TopN
        scanner = new MergeSortTopNResultIterator(iterators, limit, offset, orderBy.getOrderByExpressions());
    } else {
        scanner = new ConcatResultIterator(iterators);
        if (offset != null) {
            scanner = new OffsetResultIterator(scanner, offset);
        }
        if (limit != null) {
            scanner = new LimitingResultIterator(scanner, limit);
        }
    }
    return scanner;
}
Also used : OffsetResultIterator(org.apache.phoenix.iterate.OffsetResultIterator) UnionResultIterators(org.apache.phoenix.iterate.UnionResultIterators) MergeSortTopNResultIterator(org.apache.phoenix.iterate.MergeSortTopNResultIterator) LimitingResultIterator(org.apache.phoenix.iterate.LimitingResultIterator) OffsetResultIterator(org.apache.phoenix.iterate.OffsetResultIterator) ConcatResultIterator(org.apache.phoenix.iterate.ConcatResultIterator) ResultIterator(org.apache.phoenix.iterate.ResultIterator) MergeSortTopNResultIterator(org.apache.phoenix.iterate.MergeSortTopNResultIterator) LimitingResultIterator(org.apache.phoenix.iterate.LimitingResultIterator) ConcatResultIterator(org.apache.phoenix.iterate.ConcatResultIterator)

Example 5 with ConcatResultIterator

use of org.apache.phoenix.iterate.ConcatResultIterator in project phoenix by apache.

the class ScanPlan method newIterator.

@Override
protected ResultIterator newIterator(ParallelScanGrouper scanGrouper, Scan scan, Map<ImmutableBytesPtr, ServerCache> caches) throws SQLException {
    // Set any scan attributes before creating the scanner, as it will be too late afterwards
    scan.setAttribute(BaseScannerRegionObserver.NON_AGGREGATE_QUERY, QueryConstants.TRUE);
    ResultIterator scanner;
    TableRef tableRef = this.getTableRef();
    PTable table = tableRef.getTable();
    boolean isSalted = table.getBucketNum() != null;
    /* If no limit or topN, use parallel iterator so that we get results faster. Otherwise, if
         * limit is provided, run query serially.
         */
    boolean isOrdered = !orderBy.getOrderByExpressions().isEmpty();
    Integer perScanLimit = !allowPageFilter || isOrdered ? null : QueryUtil.getOffsetLimit(limit, offset);
    boolean isOffsetOnServer = isOffsetPossibleOnServer(context, orderBy, offset, isSalted, table.getIndexType());
    /*
         * For queries that are doing a row key order by and are not possibly querying more than a
         * threshold worth of data, then we only need to initialize scanners corresponding to the
         * first (or last, if reverse) scan per region.
         */
    boolean initFirstScanOnly = (orderBy == OrderBy.FWD_ROW_KEY_ORDER_BY || orderBy == OrderBy.REV_ROW_KEY_ORDER_BY) && isDataToScanWithinThreshold;
    BaseResultIterators iterators;
    if (isOffsetOnServer) {
        iterators = new SerialIterators(this, perScanLimit, offset, parallelIteratorFactory, scanGrouper, scan, caches, dataPlan);
    } else if (isSerial) {
        iterators = new SerialIterators(this, perScanLimit, null, parallelIteratorFactory, scanGrouper, scan, caches, dataPlan);
    } else {
        iterators = new ParallelIterators(this, perScanLimit, parallelIteratorFactory, scanGrouper, scan, initFirstScanOnly, caches, dataPlan);
    }
    estimatedRows = iterators.getEstimatedRowCount();
    estimatedSize = iterators.getEstimatedByteCount();
    estimateInfoTimestamp = iterators.getEstimateInfoTimestamp();
    splits = iterators.getSplits();
    scans = iterators.getScans();
    if (isOffsetOnServer) {
        scanner = new ConcatResultIterator(iterators);
        if (limit != null) {
            scanner = new LimitingResultIterator(scanner, limit);
        }
    } else if (isOrdered) {
        scanner = new MergeSortTopNResultIterator(iterators, limit, offset, orderBy.getOrderByExpressions());
    } else {
        if ((isSalted || table.getIndexType() == IndexType.LOCAL) && ScanUtil.shouldRowsBeInRowKeyOrder(orderBy, context)) {
            /*
                 * For salted tables or local index, a merge sort is needed if: 
                 * 1) The config phoenix.query.force.rowkeyorder is set to true 
                 * 2) Or if the query has an order by that wants to sort
                 * the results by the row key (forward or reverse ordering)
                 */
            scanner = new MergeSortRowKeyResultIterator(iterators, isSalted ? SaltingUtil.NUM_SALTING_BYTES : 0, orderBy == OrderBy.REV_ROW_KEY_ORDER_BY);
        } else if (useRoundRobinIterator()) {
            /*
                 * For any kind of tables, round robin is possible if there is
                 * no ordering of rows needed.
                 */
            scanner = new RoundRobinResultIterator(iterators, this);
        } else {
            scanner = new ConcatResultIterator(iterators);
        }
        if (offset != null) {
            scanner = new OffsetResultIterator(scanner, offset);
        }
        if (limit != null) {
            scanner = new LimitingResultIterator(scanner, limit);
        }
    }
    if (context.getSequenceManager().getSequenceCount() > 0) {
        scanner = new SequenceResultIterator(scanner, context.getSequenceManager());
    }
    return scanner;
}
Also used : ParallelIterators(org.apache.phoenix.iterate.ParallelIterators) MergeSortRowKeyResultIterator(org.apache.phoenix.iterate.MergeSortRowKeyResultIterator) OffsetResultIterator(org.apache.phoenix.iterate.OffsetResultIterator) SerialIterators(org.apache.phoenix.iterate.SerialIterators) MergeSortTopNResultIterator(org.apache.phoenix.iterate.MergeSortTopNResultIterator) SequenceResultIterator(org.apache.phoenix.iterate.SequenceResultIterator) MergeSortRowKeyResultIterator(org.apache.phoenix.iterate.MergeSortRowKeyResultIterator) SpoolingResultIterator(org.apache.phoenix.iterate.SpoolingResultIterator) ConcatResultIterator(org.apache.phoenix.iterate.ConcatResultIterator) ResultIterator(org.apache.phoenix.iterate.ResultIterator) MergeSortTopNResultIterator(org.apache.phoenix.iterate.MergeSortTopNResultIterator) ChunkedResultIterator(org.apache.phoenix.iterate.ChunkedResultIterator) SequenceResultIterator(org.apache.phoenix.iterate.SequenceResultIterator) LimitingResultIterator(org.apache.phoenix.iterate.LimitingResultIterator) OffsetResultIterator(org.apache.phoenix.iterate.OffsetResultIterator) RoundRobinResultIterator(org.apache.phoenix.iterate.RoundRobinResultIterator) BaseResultIterators(org.apache.phoenix.iterate.BaseResultIterators) LimitingResultIterator(org.apache.phoenix.iterate.LimitingResultIterator) RoundRobinResultIterator(org.apache.phoenix.iterate.RoundRobinResultIterator) PTable(org.apache.phoenix.schema.PTable) ConcatResultIterator(org.apache.phoenix.iterate.ConcatResultIterator) TableRef(org.apache.phoenix.schema.TableRef)

Aggregations

ConcatResultIterator (org.apache.phoenix.iterate.ConcatResultIterator)5 LimitingResultIterator (org.apache.phoenix.iterate.LimitingResultIterator)5 OffsetResultIterator (org.apache.phoenix.iterate.OffsetResultIterator)5 ResultIterator (org.apache.phoenix.iterate.ResultIterator)5 BaseResultIterators (org.apache.phoenix.iterate.BaseResultIterators)4 MergeSortRowKeyResultIterator (org.apache.phoenix.iterate.MergeSortRowKeyResultIterator)4 ParallelIterators (org.apache.phoenix.iterate.ParallelIterators)4 SequenceResultIterator (org.apache.phoenix.iterate.SequenceResultIterator)4 SerialIterators (org.apache.phoenix.iterate.SerialIterators)4 SpoolingResultIterator (org.apache.phoenix.iterate.SpoolingResultIterator)4 MergeSortTopNResultIterator (org.apache.phoenix.iterate.MergeSortTopNResultIterator)3 AggregatingResultIterator (org.apache.phoenix.iterate.AggregatingResultIterator)2 ChunkedResultIterator (org.apache.phoenix.iterate.ChunkedResultIterator)2 DistinctAggregatingResultIterator (org.apache.phoenix.iterate.DistinctAggregatingResultIterator)2 FilterAggregatingResultIterator (org.apache.phoenix.iterate.FilterAggregatingResultIterator)2 GroupedAggregatingResultIterator (org.apache.phoenix.iterate.GroupedAggregatingResultIterator)2 OrderedAggregatingResultIterator (org.apache.phoenix.iterate.OrderedAggregatingResultIterator)2 OrderedResultIterator (org.apache.phoenix.iterate.OrderedResultIterator)2 PeekingResultIterator (org.apache.phoenix.iterate.PeekingResultIterator)2 RoundRobinResultIterator (org.apache.phoenix.iterate.RoundRobinResultIterator)2