use of org.apache.phoenix.iterate.DelegateResultIterator in project phoenix by apache.
the class BaseQueryPlan method iterator.
public final ResultIterator iterator(final List<? extends SQLCloseable> dependencies, ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
if (scan == null) {
scan = context.getScan();
}
/*
* For aggregate queries, we still need to let the AggregationPlan to
* proceed so that we can give proper aggregates even if there are no
* row to be scanned.
*/
if (context.getScanRanges() == ScanRanges.NOTHING && !getStatement().isAggregate()) {
return ResultIterator.EMPTY_ITERATOR;
}
if (tableRef == TableRef.EMPTY_TABLE_REF) {
return newIterator(scanGrouper, scan);
}
// Set miscellaneous scan attributes. This is the last chance to set them before we
// clone the scan for each parallelized chunk.
TableRef tableRef = context.getCurrentTable();
PTable table = tableRef.getTable();
if (dynamicFilter != null) {
WhereCompiler.compile(context, statement, null, Collections.singletonList(dynamicFilter), false, null);
}
if (OrderBy.REV_ROW_KEY_ORDER_BY.equals(orderBy)) {
ScanUtil.setReversed(scan);
// Hack for working around PHOENIX-3121 and HBASE-16296.
// TODO: remove once PHOENIX-3121 and/or HBASE-16296 are fixed.
int scannerCacheSize = context.getStatement().getFetchSize();
if (limit != null && limit % scannerCacheSize == 0) {
scan.setCaching(scannerCacheSize + 1);
}
}
if (statement.getHint().hasHint(Hint.SMALL)) {
scan.setSmall(true);
}
PhoenixConnection connection = context.getConnection();
// set read consistency
if (table.getType() != PTableType.SYSTEM) {
scan.setConsistency(connection.getConsistency());
}
// TODO fix this in PHOENIX-2415 Support ROW_TIMESTAMP with transactional tables
if (!table.isTransactional()) {
// Get the time range of row_timestamp column
TimeRange rowTimestampRange = context.getScanRanges().getRowTimestampRange();
// Get the already existing time range on the scan.
TimeRange scanTimeRange = scan.getTimeRange();
Long scn = connection.getSCN();
if (scn == null) {
// If we haven't resolved the time at the beginning of compilation, don't
// force the lookup on the server, but use HConstants.LATEST_TIMESTAMP instead.
scn = tableRef.getTimeStamp();
if (scn == QueryConstants.UNSET_TIMESTAMP) {
scn = HConstants.LATEST_TIMESTAMP;
}
}
try {
TimeRange timeRangeToUse = ScanUtil.intersectTimeRange(rowTimestampRange, scanTimeRange, scn);
if (timeRangeToUse == null) {
return ResultIterator.EMPTY_ITERATOR;
}
scan.setTimeRange(timeRangeToUse.getMin(), timeRangeToUse.getMax());
} catch (IOException e) {
throw new RuntimeException(e);
}
}
byte[] tenantIdBytes;
if (table.isMultiTenant() == true) {
tenantIdBytes = connection.getTenantId() == null ? null : ScanUtil.getTenantIdBytes(table.getRowKeySchema(), table.getBucketNum() != null, connection.getTenantId(), table.getViewIndexId() != null);
} else {
tenantIdBytes = connection.getTenantId() == null ? null : connection.getTenantId().getBytes();
}
ScanUtil.setTenantId(scan, tenantIdBytes);
String customAnnotations = LogUtil.customAnnotationsToString(connection);
ScanUtil.setCustomAnnotations(scan, customAnnotations == null ? null : customAnnotations.getBytes());
// Set local index related scan attributes.
if (table.getIndexType() == IndexType.LOCAL) {
ScanUtil.setLocalIndex(scan);
Set<PColumn> dataColumns = context.getDataColumns();
// project is not present in the index then we need to skip this plan.
if (!dataColumns.isEmpty()) {
// Set data columns to be join back from data table.
PTable parentTable = context.getCurrentTable().getTable();
String parentSchemaName = parentTable.getParentSchemaName().getString();
String parentTableName = parentTable.getParentTableName().getString();
final ParseNodeFactory FACTORY = new ParseNodeFactory();
// TODO: is it necessary to re-resolve the table?
TableRef dataTableRef = FromCompiler.getResolver(FACTORY.namedTable(null, TableName.create(parentSchemaName, parentTableName)), context.getConnection()).resolveTable(parentSchemaName, parentTableName);
PTable dataTable = dataTableRef.getTable();
// Set data columns to be join back from data table.
serializeDataTableColumnsToJoin(scan, dataColumns, dataTable);
KeyValueSchema schema = ProjectedColumnExpression.buildSchema(dataColumns);
// Set key value schema of the data columns.
serializeSchemaIntoScan(scan, schema);
// Set index maintainer of the local index.
serializeIndexMaintainerIntoScan(scan, dataTable);
// Set view constants if exists.
serializeViewConstantsIntoScan(scan, dataTable);
}
}
if (LOG.isDebugEnabled()) {
LOG.debug(LogUtil.addCustomAnnotations("Scan ready for iteration: " + scan, connection));
}
ResultIterator iterator = newIterator(scanGrouper, scan);
iterator = dependencies.isEmpty() ? iterator : new DelegateResultIterator(iterator) {
@Override
public void close() throws SQLException {
try {
super.close();
} finally {
SQLCloseables.closeAll(dependencies);
}
}
};
if (LOG.isDebugEnabled()) {
LOG.debug(LogUtil.addCustomAnnotations("Iterator ready: " + iterator, connection));
}
// wrap the iterator so we start/end tracing as we expect
TraceScope scope = Tracing.startNewSpan(context.getConnection(), "Creating basic query for " + getPlanSteps(iterator));
return (scope.getSpan() != null) ? new TracingIterator(scope, iterator) : iterator;
}
Aggregations