use of org.apache.phoenix.parse.ParseNodeFactory in project phoenix by apache.
the class BaseQueryPlan method iterator.
public final ResultIterator iterator(final List<? extends SQLCloseable> dependencies, ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
if (scan == null) {
scan = context.getScan();
}
/*
* For aggregate queries, we still need to let the AggregationPlan to
* proceed so that we can give proper aggregates even if there are no
* row to be scanned.
*/
if (context.getScanRanges() == ScanRanges.NOTHING && !getStatement().isAggregate()) {
return ResultIterator.EMPTY_ITERATOR;
}
if (tableRef == TableRef.EMPTY_TABLE_REF) {
return newIterator(scanGrouper, scan);
}
// Set miscellaneous scan attributes. This is the last chance to set them before we
// clone the scan for each parallelized chunk.
TableRef tableRef = context.getCurrentTable();
PTable table = tableRef.getTable();
if (dynamicFilter != null) {
WhereCompiler.compile(context, statement, null, Collections.singletonList(dynamicFilter), false, null);
}
if (OrderBy.REV_ROW_KEY_ORDER_BY.equals(orderBy)) {
ScanUtil.setReversed(scan);
// Hack for working around PHOENIX-3121 and HBASE-16296.
// TODO: remove once PHOENIX-3121 and/or HBASE-16296 are fixed.
int scannerCacheSize = context.getStatement().getFetchSize();
if (limit != null && limit % scannerCacheSize == 0) {
scan.setCaching(scannerCacheSize + 1);
}
}
if (statement.getHint().hasHint(Hint.SMALL)) {
scan.setSmall(true);
}
PhoenixConnection connection = context.getConnection();
// set read consistency
if (table.getType() != PTableType.SYSTEM) {
scan.setConsistency(connection.getConsistency());
}
// TODO fix this in PHOENIX-2415 Support ROW_TIMESTAMP with transactional tables
if (!table.isTransactional()) {
// Get the time range of row_timestamp column
TimeRange rowTimestampRange = context.getScanRanges().getRowTimestampRange();
// Get the already existing time range on the scan.
TimeRange scanTimeRange = scan.getTimeRange();
Long scn = connection.getSCN();
if (scn == null) {
// If we haven't resolved the time at the beginning of compilation, don't
// force the lookup on the server, but use HConstants.LATEST_TIMESTAMP instead.
scn = tableRef.getTimeStamp();
if (scn == QueryConstants.UNSET_TIMESTAMP) {
scn = HConstants.LATEST_TIMESTAMP;
}
}
try {
TimeRange timeRangeToUse = ScanUtil.intersectTimeRange(rowTimestampRange, scanTimeRange, scn);
if (timeRangeToUse == null) {
return ResultIterator.EMPTY_ITERATOR;
}
scan.setTimeRange(timeRangeToUse.getMin(), timeRangeToUse.getMax());
} catch (IOException e) {
throw new RuntimeException(e);
}
}
byte[] tenantIdBytes;
if (table.isMultiTenant() == true) {
tenantIdBytes = connection.getTenantId() == null ? null : ScanUtil.getTenantIdBytes(table.getRowKeySchema(), table.getBucketNum() != null, connection.getTenantId(), table.getViewIndexId() != null);
} else {
tenantIdBytes = connection.getTenantId() == null ? null : connection.getTenantId().getBytes();
}
ScanUtil.setTenantId(scan, tenantIdBytes);
String customAnnotations = LogUtil.customAnnotationsToString(connection);
ScanUtil.setCustomAnnotations(scan, customAnnotations == null ? null : customAnnotations.getBytes());
// Set local index related scan attributes.
if (table.getIndexType() == IndexType.LOCAL) {
ScanUtil.setLocalIndex(scan);
Set<PColumn> dataColumns = context.getDataColumns();
// project is not present in the index then we need to skip this plan.
if (!dataColumns.isEmpty()) {
// Set data columns to be join back from data table.
PTable parentTable = context.getCurrentTable().getTable();
String parentSchemaName = parentTable.getParentSchemaName().getString();
String parentTableName = parentTable.getParentTableName().getString();
final ParseNodeFactory FACTORY = new ParseNodeFactory();
// TODO: is it necessary to re-resolve the table?
TableRef dataTableRef = FromCompiler.getResolver(FACTORY.namedTable(null, TableName.create(parentSchemaName, parentTableName)), context.getConnection()).resolveTable(parentSchemaName, parentTableName);
PTable dataTable = dataTableRef.getTable();
// Set data columns to be join back from data table.
serializeDataTableColumnsToJoin(scan, dataColumns, dataTable);
KeyValueSchema schema = ProjectedColumnExpression.buildSchema(dataColumns);
// Set key value schema of the data columns.
serializeSchemaIntoScan(scan, schema);
// Set index maintainer of the local index.
serializeIndexMaintainerIntoScan(scan, dataTable);
// Set view constants if exists.
serializeViewConstantsIntoScan(scan, dataTable);
}
}
if (LOG.isDebugEnabled()) {
LOG.debug(LogUtil.addCustomAnnotations("Scan ready for iteration: " + scan, connection));
}
ResultIterator iterator = newIterator(scanGrouper, scan);
iterator = dependencies.isEmpty() ? iterator : new DelegateResultIterator(iterator) {
@Override
public void close() throws SQLException {
try {
super.close();
} finally {
SQLCloseables.closeAll(dependencies);
}
}
};
if (LOG.isDebugEnabled()) {
LOG.debug(LogUtil.addCustomAnnotations("Iterator ready: " + iterator, connection));
}
// wrap the iterator so we start/end tracing as we expect
TraceScope scope = Tracing.startNewSpan(context.getConnection(), "Creating basic query for " + getPlanSteps(iterator));
return (scope.getSpan() != null) ? new TracingIterator(scope, iterator) : iterator;
}
use of org.apache.phoenix.parse.ParseNodeFactory in project phoenix by apache.
the class TraceQueryPlan method iterator.
@Override
public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException {
final PhoenixConnection conn = stmt.getConnection();
if (conn.getTraceScope() == null && !traceStatement.isTraceOn()) {
return ResultIterator.EMPTY_ITERATOR;
}
return new ResultIterator() {
@Override
public void close() throws SQLException {
}
@Override
public Tuple next() throws SQLException {
if (!first)
return null;
TraceScope traceScope = conn.getTraceScope();
if (traceStatement.isTraceOn()) {
conn.setSampler(Tracing.getConfiguredSampler(traceStatement));
if (conn.getSampler() == Sampler.NEVER) {
closeTraceScope(conn);
}
if (traceScope == null && !conn.getSampler().equals(Sampler.NEVER)) {
traceScope = Tracing.startNewSpan(conn, "Enabling trace");
if (traceScope.getSpan() != null) {
conn.setTraceScope(traceScope);
} else {
closeTraceScope(conn);
}
}
} else {
closeTraceScope(conn);
conn.setSampler(Sampler.NEVER);
}
if (traceScope == null || traceScope.getSpan() == null)
return null;
first = false;
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
ParseNodeFactory factory = new ParseNodeFactory();
LiteralParseNode literal = factory.literal(traceScope.getSpan().getTraceId());
LiteralExpression expression = LiteralExpression.newConstant(literal.getValue(), PLong.INSTANCE, Determinism.ALWAYS);
expression.evaluate(null, ptr);
byte[] rowKey = ByteUtil.copyKeyBytesIfNecessary(ptr);
Cell cell = CellUtil.createCell(rowKey, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, System.currentTimeMillis(), Type.Put.getCode(), HConstants.EMPTY_BYTE_ARRAY);
List<Cell> cells = new ArrayList<Cell>(1);
cells.add(cell);
return new ResultTuple(Result.create(cells));
}
private void closeTraceScope(final PhoenixConnection conn) {
if (conn.getTraceScope() != null) {
conn.getTraceScope().close();
conn.setTraceScope(null);
}
}
@Override
public void explain(List<String> planSteps) {
}
};
}
use of org.apache.phoenix.parse.ParseNodeFactory in project phoenix by apache.
the class ListJarsQueryPlan method iterator.
@Override
public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException {
return new ResultIterator() {
private RemoteIterator<LocatedFileStatus> listFiles = null;
@Override
public void close() throws SQLException {
}
@Override
public Tuple next() throws SQLException {
try {
if (first) {
String dynamicJarsDir = stmt.getConnection().getQueryServices().getProps().get(QueryServices.DYNAMIC_JARS_DIR_KEY);
if (dynamicJarsDir == null) {
throw new SQLException(QueryServices.DYNAMIC_JARS_DIR_KEY + " is not configured for the listing the jars.");
}
dynamicJarsDir = dynamicJarsDir.endsWith("/") ? dynamicJarsDir : dynamicJarsDir + '/';
Configuration conf = HBaseFactoryProvider.getConfigurationFactory().getConfiguration();
Path dynamicJarsDirPath = new Path(dynamicJarsDir);
FileSystem fs = dynamicJarsDirPath.getFileSystem(conf);
listFiles = fs.listFiles(dynamicJarsDirPath, true);
first = false;
}
if (listFiles == null || !listFiles.hasNext())
return null;
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
ParseNodeFactory factory = new ParseNodeFactory();
LiteralParseNode literal = factory.literal(listFiles.next().getPath().toString());
LiteralExpression expression = LiteralExpression.newConstant(literal.getValue(), PVarchar.INSTANCE, Determinism.ALWAYS);
expression.evaluate(null, ptr);
byte[] rowKey = ByteUtil.copyKeyBytesIfNecessary(ptr);
Cell cell = CellUtil.createCell(rowKey, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, System.currentTimeMillis(), Type.Put.getCode(), HConstants.EMPTY_BYTE_ARRAY);
List<Cell> cells = new ArrayList<Cell>(1);
cells.add(cell);
return new ResultTuple(Result.create(cells));
} catch (IOException e) {
throw new SQLException(e);
}
}
@Override
public void explain(List<String> planSteps) {
}
};
}
Aggregations