use of org.apache.phoenix.iterate.ResultIterator in project phoenix by apache.
the class LiteralResultIteratorPlanTest method testLiteralResultIteratorPlan.
private void testLiteralResultIteratorPlan(Object[][] expectedResult, Integer offset, Integer limit) throws SQLException {
QueryPlan plan = newLiteralResultIterationPlan(offset, limit);
ResultIterator iter = plan.iterator();
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
for (Object[] row : expectedResult) {
Tuple next = iter.next();
assertNotNull(next);
for (int i = 0; i < row.length; i++) {
PColumn column = table.getColumns().get(i);
boolean eval = new ProjectedColumnExpression(column, table, column.getName().getString()).evaluate(next, ptr);
Object o = eval ? column.getDataType().toObject(ptr) : null;
assertEquals(row[i], o);
}
}
assertNull(iter.next());
}
use of org.apache.phoenix.iterate.ResultIterator in project phoenix by apache.
the class LiteralResultIterationPlan method newIterator.
@Override
protected ResultIterator newIterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
ResultIterator scanner = new ResultIterator() {
private final Iterator<Tuple> tupleIterator = tuples.iterator();
private boolean closed = false;
private int count = 0;
private int offsetCount = 0;
@Override
public void close() throws SQLException {
this.closed = true;
;
}
@Override
public Tuple next() throws SQLException {
while (!this.closed && (offset != null && offsetCount < offset) && tupleIterator.hasNext()) {
offsetCount++;
tupleIterator.next();
}
if (!this.closed && (limit == null || count++ < limit) && tupleIterator.hasNext()) {
return tupleIterator.next();
}
return null;
}
@Override
public void explain(List<String> planSteps) {
}
};
if (context.getSequenceManager().getSequenceCount() > 0) {
scanner = new SequenceResultIterator(scanner, context.getSequenceManager());
}
return scanner;
}
use of org.apache.phoenix.iterate.ResultIterator in project phoenix by apache.
the class ScanPlan method newIterator.
@Override
protected ResultIterator newIterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
// Set any scan attributes before creating the scanner, as it will be too late afterwards
scan.setAttribute(BaseScannerRegionObserver.NON_AGGREGATE_QUERY, QueryConstants.TRUE);
ResultIterator scanner;
TableRef tableRef = this.getTableRef();
PTable table = tableRef.getTable();
boolean isSalted = table.getBucketNum() != null;
/* If no limit or topN, use parallel iterator so that we get results faster. Otherwise, if
* limit is provided, run query serially.
*/
boolean isOrdered = !orderBy.getOrderByExpressions().isEmpty();
Integer perScanLimit = !allowPageFilter || isOrdered ? null : QueryUtil.getOffsetLimit(limit, offset);
boolean isOffsetOnServer = isOffsetPossibleOnServer(context, orderBy, offset, isSalted, table.getIndexType());
/*
* For queries that are doing a row key order by and are not possibly querying more than a
* threshold worth of data, then we only need to initialize scanners corresponding to the
* first (or last, if reverse) scan per region.
*/
boolean initFirstScanOnly = (orderBy == OrderBy.FWD_ROW_KEY_ORDER_BY || orderBy == OrderBy.REV_ROW_KEY_ORDER_BY) && isDataToScanWithinThreshold;
BaseResultIterators iterators;
if (isOffsetOnServer) {
iterators = new SerialIterators(this, perScanLimit, offset, parallelIteratorFactory, scanGrouper, scan);
} else if (isSerial) {
iterators = new SerialIterators(this, perScanLimit, null, parallelIteratorFactory, scanGrouper, scan);
} else {
iterators = new ParallelIterators(this, perScanLimit, parallelIteratorFactory, scanGrouper, scan, initFirstScanOnly);
}
estimatedRows = iterators.getEstimatedRowCount();
estimatedSize = iterators.getEstimatedByteCount();
splits = iterators.getSplits();
scans = iterators.getScans();
if (isOffsetOnServer) {
scanner = new ConcatResultIterator(iterators);
if (limit != null) {
scanner = new LimitingResultIterator(scanner, limit);
}
} else if (isOrdered) {
scanner = new MergeSortTopNResultIterator(iterators, limit, offset, orderBy.getOrderByExpressions());
} else {
if ((isSalted || table.getIndexType() == IndexType.LOCAL) && ScanUtil.shouldRowsBeInRowKeyOrder(orderBy, context)) {
/*
* For salted tables or local index, a merge sort is needed if:
* 1) The config phoenix.query.force.rowkeyorder is set to true
* 2) Or if the query has an order by that wants to sort
* the results by the row key (forward or reverse ordering)
*/
scanner = new MergeSortRowKeyResultIterator(iterators, isSalted ? SaltingUtil.NUM_SALTING_BYTES : 0, orderBy == OrderBy.REV_ROW_KEY_ORDER_BY);
} else if (useRoundRobinIterator()) {
/*
* For any kind of tables, round robin is possible if there is
* no ordering of rows needed.
*/
scanner = new RoundRobinResultIterator(iterators, this);
} else {
scanner = new ConcatResultIterator(iterators);
}
if (offset != null) {
scanner = new OffsetResultIterator(scanner, offset);
}
if (limit != null) {
scanner = new LimitingResultIterator(scanner, limit);
}
}
if (context.getSequenceManager().getSequenceCount() > 0) {
scanner = new SequenceResultIterator(scanner, context.getSequenceManager());
}
return scanner;
}
use of org.apache.phoenix.iterate.ResultIterator in project phoenix by apache.
the class CorrelatePlan method iterator.
@Override
public ResultIterator iterator(final ParallelScanGrouper scanGrouper, final Scan scan) throws SQLException {
return new ResultIterator() {
private final ValueBitSet destBitSet = ValueBitSet.newInstance(joinedSchema);
private final ValueBitSet lhsBitSet = ValueBitSet.newInstance(lhsSchema);
private final ValueBitSet rhsBitSet = (joinType == JoinType.Semi || joinType == JoinType.Anti) ? ValueBitSet.EMPTY_VALUE_BITSET : ValueBitSet.newInstance(rhsSchema);
private final ResultIterator iter = delegate.iterator(scanGrouper, scan);
private ResultIterator rhsIter = null;
private Tuple current = null;
private boolean closed = false;
@Override
public void close() throws SQLException {
if (!closed) {
closed = true;
iter.close();
if (rhsIter != null) {
rhsIter.close();
}
}
}
@Override
public Tuple next() throws SQLException {
if (closed)
return null;
Tuple rhsCurrent = null;
if (rhsIter != null) {
rhsCurrent = rhsIter.next();
if (rhsCurrent == null) {
rhsIter.close();
rhsIter = null;
} else if (isSingleValueOnly) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.SINGLE_ROW_SUBQUERY_RETURNS_MULTIPLE_ROWS).build().buildException();
}
}
while (rhsIter == null) {
current = iter.next();
if (current == null) {
close();
return null;
}
runtimeContext.setCorrelateVariableValue(variableId, current);
rhsIter = rhs.iterator();
rhsCurrent = rhsIter.next();
if ((rhsCurrent == null && (joinType == JoinType.Inner || joinType == JoinType.Semi)) || (rhsCurrent != null && joinType == JoinType.Anti)) {
rhsIter.close();
rhsIter = null;
}
}
Tuple joined;
try {
joined = rhsBitSet == ValueBitSet.EMPTY_VALUE_BITSET ? current : TupleProjector.mergeProjectedValue(convertLhs(current), joinedSchema, destBitSet, rhsCurrent, rhsSchema, rhsBitSet, rhsFieldPosition, true);
} catch (IOException e) {
throw new SQLException(e);
}
if ((joinType == JoinType.Semi || rhsCurrent == null) && rhsIter != null) {
rhsIter.close();
rhsIter = null;
}
return joined;
}
@Override
public void explain(List<String> planSteps) {
}
private ProjectedValueTuple convertLhs(Tuple lhs) throws IOException {
ProjectedValueTuple t;
if (lhs instanceof ProjectedValueTuple) {
t = (ProjectedValueTuple) lhs;
} else {
ImmutableBytesWritable ptr = getContext().getTempPtr();
TupleProjector.decodeProjectedValue(lhs, ptr);
lhsBitSet.clear();
lhsBitSet.or(ptr);
int bitSetLen = lhsBitSet.getEstimatedLength();
t = new ProjectedValueTuple(lhs, lhs.getValue(0).getTimestamp(), ptr.get(), ptr.getOffset(), ptr.getLength(), bitSetLen);
}
return t;
}
};
}
use of org.apache.phoenix.iterate.ResultIterator in project phoenix by apache.
the class UnionPlan method iterator.
@Override
public final ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
this.iterators = new UnionResultIterators(plans, parentContext);
ResultIterator scanner;
boolean isOrdered = !orderBy.getOrderByExpressions().isEmpty();
if (isOrdered) {
// TopN
scanner = new MergeSortTopNResultIterator(iterators, limit, offset, orderBy.getOrderByExpressions());
} else {
scanner = new ConcatResultIterator(iterators);
if (offset != null) {
scanner = new OffsetResultIterator(scanner, offset);
}
if (limit != null) {
scanner = new LimitingResultIterator(scanner, limit);
}
}
return scanner;
}
Aggregations