Search in sources :

Example 16 with ResultIterator

use of org.apache.phoenix.iterate.ResultIterator in project phoenix by apache.

the class LiteralResultIteratorPlanTest method testLiteralResultIteratorPlan.

private void testLiteralResultIteratorPlan(Object[][] expectedResult, Integer offset, Integer limit) throws SQLException {
    QueryPlan plan = newLiteralResultIterationPlan(offset, limit);
    ResultIterator iter = plan.iterator();
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    for (Object[] row : expectedResult) {
        Tuple next = iter.next();
        assertNotNull(next);
        for (int i = 0; i < row.length; i++) {
            PColumn column = table.getColumns().get(i);
            boolean eval = new ProjectedColumnExpression(column, table, column.getName().getString()).evaluate(next, ptr);
            Object o = eval ? column.getDataType().toObject(ptr) : null;
            assertEquals(row[i], o);
        }
    }
    assertNull(iter.next());
}
Also used : PColumn(org.apache.phoenix.schema.PColumn) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) ResultIterator(org.apache.phoenix.iterate.ResultIterator) ProjectedColumnExpression(org.apache.phoenix.expression.ProjectedColumnExpression) QueryPlan(org.apache.phoenix.compile.QueryPlan) Tuple(org.apache.phoenix.schema.tuple.Tuple) SingleKeyValueTuple(org.apache.phoenix.schema.tuple.SingleKeyValueTuple)

Example 17 with ResultIterator

use of org.apache.phoenix.iterate.ResultIterator in project phoenix by apache.

the class LiteralResultIterationPlan method newIterator.

@Override
protected ResultIterator newIterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
    ResultIterator scanner = new ResultIterator() {

        private final Iterator<Tuple> tupleIterator = tuples.iterator();

        private boolean closed = false;

        private int count = 0;

        private int offsetCount = 0;

        @Override
        public void close() throws SQLException {
            this.closed = true;
            ;
        }

        @Override
        public Tuple next() throws SQLException {
            while (!this.closed && (offset != null && offsetCount < offset) && tupleIterator.hasNext()) {
                offsetCount++;
                tupleIterator.next();
            }
            if (!this.closed && (limit == null || count++ < limit) && tupleIterator.hasNext()) {
                return tupleIterator.next();
            }
            return null;
        }

        @Override
        public void explain(List<String> planSteps) {
        }
    };
    if (context.getSequenceManager().getSequenceCount() > 0) {
        scanner = new SequenceResultIterator(scanner, context.getSequenceManager());
    }
    return scanner;
}
Also used : SequenceResultIterator(org.apache.phoenix.iterate.SequenceResultIterator) SequenceResultIterator(org.apache.phoenix.iterate.SequenceResultIterator) ResultIterator(org.apache.phoenix.iterate.ResultIterator) Iterator(java.util.Iterator) SequenceResultIterator(org.apache.phoenix.iterate.SequenceResultIterator) ResultIterator(org.apache.phoenix.iterate.ResultIterator) List(java.util.List)

Example 18 with ResultIterator

use of org.apache.phoenix.iterate.ResultIterator in project phoenix by apache.

the class ScanPlan method newIterator.

@Override
protected ResultIterator newIterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
    // Set any scan attributes before creating the scanner, as it will be too late afterwards
    scan.setAttribute(BaseScannerRegionObserver.NON_AGGREGATE_QUERY, QueryConstants.TRUE);
    ResultIterator scanner;
    TableRef tableRef = this.getTableRef();
    PTable table = tableRef.getTable();
    boolean isSalted = table.getBucketNum() != null;
    /* If no limit or topN, use parallel iterator so that we get results faster. Otherwise, if
         * limit is provided, run query serially.
         */
    boolean isOrdered = !orderBy.getOrderByExpressions().isEmpty();
    Integer perScanLimit = !allowPageFilter || isOrdered ? null : QueryUtil.getOffsetLimit(limit, offset);
    boolean isOffsetOnServer = isOffsetPossibleOnServer(context, orderBy, offset, isSalted, table.getIndexType());
    /*
         * For queries that are doing a row key order by and are not possibly querying more than a
         * threshold worth of data, then we only need to initialize scanners corresponding to the
         * first (or last, if reverse) scan per region.
         */
    boolean initFirstScanOnly = (orderBy == OrderBy.FWD_ROW_KEY_ORDER_BY || orderBy == OrderBy.REV_ROW_KEY_ORDER_BY) && isDataToScanWithinThreshold;
    BaseResultIterators iterators;
    if (isOffsetOnServer) {
        iterators = new SerialIterators(this, perScanLimit, offset, parallelIteratorFactory, scanGrouper, scan);
    } else if (isSerial) {
        iterators = new SerialIterators(this, perScanLimit, null, parallelIteratorFactory, scanGrouper, scan);
    } else {
        iterators = new ParallelIterators(this, perScanLimit, parallelIteratorFactory, scanGrouper, scan, initFirstScanOnly);
    }
    estimatedRows = iterators.getEstimatedRowCount();
    estimatedSize = iterators.getEstimatedByteCount();
    splits = iterators.getSplits();
    scans = iterators.getScans();
    if (isOffsetOnServer) {
        scanner = new ConcatResultIterator(iterators);
        if (limit != null) {
            scanner = new LimitingResultIterator(scanner, limit);
        }
    } else if (isOrdered) {
        scanner = new MergeSortTopNResultIterator(iterators, limit, offset, orderBy.getOrderByExpressions());
    } else {
        if ((isSalted || table.getIndexType() == IndexType.LOCAL) && ScanUtil.shouldRowsBeInRowKeyOrder(orderBy, context)) {
            /*
                 * For salted tables or local index, a merge sort is needed if: 
                 * 1) The config phoenix.query.force.rowkeyorder is set to true 
                 * 2) Or if the query has an order by that wants to sort
                 * the results by the row key (forward or reverse ordering)
                 */
            scanner = new MergeSortRowKeyResultIterator(iterators, isSalted ? SaltingUtil.NUM_SALTING_BYTES : 0, orderBy == OrderBy.REV_ROW_KEY_ORDER_BY);
        } else if (useRoundRobinIterator()) {
            /*
                 * For any kind of tables, round robin is possible if there is
                 * no ordering of rows needed.
                 */
            scanner = new RoundRobinResultIterator(iterators, this);
        } else {
            scanner = new ConcatResultIterator(iterators);
        }
        if (offset != null) {
            scanner = new OffsetResultIterator(scanner, offset);
        }
        if (limit != null) {
            scanner = new LimitingResultIterator(scanner, limit);
        }
    }
    if (context.getSequenceManager().getSequenceCount() > 0) {
        scanner = new SequenceResultIterator(scanner, context.getSequenceManager());
    }
    return scanner;
}
Also used : ParallelIterators(org.apache.phoenix.iterate.ParallelIterators) MergeSortRowKeyResultIterator(org.apache.phoenix.iterate.MergeSortRowKeyResultIterator) OffsetResultIterator(org.apache.phoenix.iterate.OffsetResultIterator) SerialIterators(org.apache.phoenix.iterate.SerialIterators) MergeSortTopNResultIterator(org.apache.phoenix.iterate.MergeSortTopNResultIterator) SequenceResultIterator(org.apache.phoenix.iterate.SequenceResultIterator) MergeSortRowKeyResultIterator(org.apache.phoenix.iterate.MergeSortRowKeyResultIterator) SequenceResultIterator(org.apache.phoenix.iterate.SequenceResultIterator) SpoolingResultIterator(org.apache.phoenix.iterate.SpoolingResultIterator) LimitingResultIterator(org.apache.phoenix.iterate.LimitingResultIterator) OffsetResultIterator(org.apache.phoenix.iterate.OffsetResultIterator) ConcatResultIterator(org.apache.phoenix.iterate.ConcatResultIterator) RoundRobinResultIterator(org.apache.phoenix.iterate.RoundRobinResultIterator) ResultIterator(org.apache.phoenix.iterate.ResultIterator) MergeSortTopNResultIterator(org.apache.phoenix.iterate.MergeSortTopNResultIterator) ChunkedResultIterator(org.apache.phoenix.iterate.ChunkedResultIterator) BaseResultIterators(org.apache.phoenix.iterate.BaseResultIterators) LimitingResultIterator(org.apache.phoenix.iterate.LimitingResultIterator) RoundRobinResultIterator(org.apache.phoenix.iterate.RoundRobinResultIterator) PTable(org.apache.phoenix.schema.PTable) ConcatResultIterator(org.apache.phoenix.iterate.ConcatResultIterator) TableRef(org.apache.phoenix.schema.TableRef)

Example 19 with ResultIterator

use of org.apache.phoenix.iterate.ResultIterator in project phoenix by apache.

the class CorrelatePlan method iterator.

@Override
public ResultIterator iterator(final ParallelScanGrouper scanGrouper, final Scan scan) throws SQLException {
    return new ResultIterator() {

        private final ValueBitSet destBitSet = ValueBitSet.newInstance(joinedSchema);

        private final ValueBitSet lhsBitSet = ValueBitSet.newInstance(lhsSchema);

        private final ValueBitSet rhsBitSet = (joinType == JoinType.Semi || joinType == JoinType.Anti) ? ValueBitSet.EMPTY_VALUE_BITSET : ValueBitSet.newInstance(rhsSchema);

        private final ResultIterator iter = delegate.iterator(scanGrouper, scan);

        private ResultIterator rhsIter = null;

        private Tuple current = null;

        private boolean closed = false;

        @Override
        public void close() throws SQLException {
            if (!closed) {
                closed = true;
                iter.close();
                if (rhsIter != null) {
                    rhsIter.close();
                }
            }
        }

        @Override
        public Tuple next() throws SQLException {
            if (closed)
                return null;
            Tuple rhsCurrent = null;
            if (rhsIter != null) {
                rhsCurrent = rhsIter.next();
                if (rhsCurrent == null) {
                    rhsIter.close();
                    rhsIter = null;
                } else if (isSingleValueOnly) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.SINGLE_ROW_SUBQUERY_RETURNS_MULTIPLE_ROWS).build().buildException();
                }
            }
            while (rhsIter == null) {
                current = iter.next();
                if (current == null) {
                    close();
                    return null;
                }
                runtimeContext.setCorrelateVariableValue(variableId, current);
                rhsIter = rhs.iterator();
                rhsCurrent = rhsIter.next();
                if ((rhsCurrent == null && (joinType == JoinType.Inner || joinType == JoinType.Semi)) || (rhsCurrent != null && joinType == JoinType.Anti)) {
                    rhsIter.close();
                    rhsIter = null;
                }
            }
            Tuple joined;
            try {
                joined = rhsBitSet == ValueBitSet.EMPTY_VALUE_BITSET ? current : TupleProjector.mergeProjectedValue(convertLhs(current), joinedSchema, destBitSet, rhsCurrent, rhsSchema, rhsBitSet, rhsFieldPosition, true);
            } catch (IOException e) {
                throw new SQLException(e);
            }
            if ((joinType == JoinType.Semi || rhsCurrent == null) && rhsIter != null) {
                rhsIter.close();
                rhsIter = null;
            }
            return joined;
        }

        @Override
        public void explain(List<String> planSteps) {
        }

        private ProjectedValueTuple convertLhs(Tuple lhs) throws IOException {
            ProjectedValueTuple t;
            if (lhs instanceof ProjectedValueTuple) {
                t = (ProjectedValueTuple) lhs;
            } else {
                ImmutableBytesWritable ptr = getContext().getTempPtr();
                TupleProjector.decodeProjectedValue(lhs, ptr);
                lhsBitSet.clear();
                lhsBitSet.or(ptr);
                int bitSetLen = lhsBitSet.getEstimatedLength();
                t = new ProjectedValueTuple(lhs, lhs.getValue(0).getTimestamp(), ptr.get(), ptr.getOffset(), ptr.getLength(), bitSetLen);
            }
            return t;
        }
    };
}
Also used : ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) ValueBitSet(org.apache.phoenix.schema.ValueBitSet) SQLException(java.sql.SQLException) ResultIterator(org.apache.phoenix.iterate.ResultIterator) List(java.util.List) ProjectedValueTuple(org.apache.phoenix.execute.TupleProjector.ProjectedValueTuple) IOException(java.io.IOException) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo) Tuple(org.apache.phoenix.schema.tuple.Tuple) ProjectedValueTuple(org.apache.phoenix.execute.TupleProjector.ProjectedValueTuple)

Example 20 with ResultIterator

use of org.apache.phoenix.iterate.ResultIterator in project phoenix by apache.

the class UnionPlan method iterator.

@Override
public final ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
    this.iterators = new UnionResultIterators(plans, parentContext);
    ResultIterator scanner;
    boolean isOrdered = !orderBy.getOrderByExpressions().isEmpty();
    if (isOrdered) {
        // TopN
        scanner = new MergeSortTopNResultIterator(iterators, limit, offset, orderBy.getOrderByExpressions());
    } else {
        scanner = new ConcatResultIterator(iterators);
        if (offset != null) {
            scanner = new OffsetResultIterator(scanner, offset);
        }
        if (limit != null) {
            scanner = new LimitingResultIterator(scanner, limit);
        }
    }
    return scanner;
}
Also used : OffsetResultIterator(org.apache.phoenix.iterate.OffsetResultIterator) UnionResultIterators(org.apache.phoenix.iterate.UnionResultIterators) MergeSortTopNResultIterator(org.apache.phoenix.iterate.MergeSortTopNResultIterator) LimitingResultIterator(org.apache.phoenix.iterate.LimitingResultIterator) OffsetResultIterator(org.apache.phoenix.iterate.OffsetResultIterator) ConcatResultIterator(org.apache.phoenix.iterate.ConcatResultIterator) ResultIterator(org.apache.phoenix.iterate.ResultIterator) MergeSortTopNResultIterator(org.apache.phoenix.iterate.MergeSortTopNResultIterator) LimitingResultIterator(org.apache.phoenix.iterate.LimitingResultIterator) ConcatResultIterator(org.apache.phoenix.iterate.ConcatResultIterator)

Aggregations

ResultIterator (org.apache.phoenix.iterate.ResultIterator)26 SequenceResultIterator (org.apache.phoenix.iterate.SequenceResultIterator)9 List (java.util.List)8 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)8 LimitingResultIterator (org.apache.phoenix.iterate.LimitingResultIterator)8 OffsetResultIterator (org.apache.phoenix.iterate.OffsetResultIterator)8 SQLException (java.sql.SQLException)7 ConcatResultIterator (org.apache.phoenix.iterate.ConcatResultIterator)7 PColumn (org.apache.phoenix.schema.PColumn)7 TableRef (org.apache.phoenix.schema.TableRef)7 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)6 LiteralExpression (org.apache.phoenix.expression.LiteralExpression)5 DelegateResultIterator (org.apache.phoenix.iterate.DelegateResultIterator)5 ParallelIterators (org.apache.phoenix.iterate.ParallelIterators)5 SpoolingResultIterator (org.apache.phoenix.iterate.SpoolingResultIterator)5 PTable (org.apache.phoenix.schema.PTable)5 Tuple (org.apache.phoenix.schema.tuple.Tuple)5 ArrayList (java.util.ArrayList)4 QueryPlan (org.apache.phoenix.compile.QueryPlan)4 StatementContext (org.apache.phoenix.compile.StatementContext)4