Search in sources :

Example 1 with ParseNodeFactory

use of org.apache.phoenix.parse.ParseNodeFactory in project phoenix by apache.

the class BaseQueryPlan method iterator.

public final ResultIterator iterator(final List<? extends SQLCloseable> dependencies, ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
    if (scan == null) {
        scan = context.getScan();
    }
    /*
		 * For aggregate queries, we still need to let the AggregationPlan to
		 * proceed so that we can give proper aggregates even if there are no
		 * row to be scanned.
		 */
    if (context.getScanRanges() == ScanRanges.NOTHING && !getStatement().isAggregate()) {
        return ResultIterator.EMPTY_ITERATOR;
    }
    if (tableRef == TableRef.EMPTY_TABLE_REF) {
        return newIterator(scanGrouper, scan);
    }
    // Set miscellaneous scan attributes. This is the last chance to set them before we
    // clone the scan for each parallelized chunk.
    TableRef tableRef = context.getCurrentTable();
    PTable table = tableRef.getTable();
    if (dynamicFilter != null) {
        WhereCompiler.compile(context, statement, null, Collections.singletonList(dynamicFilter), false, null);
    }
    if (OrderBy.REV_ROW_KEY_ORDER_BY.equals(orderBy)) {
        ScanUtil.setReversed(scan);
        // Hack for working around PHOENIX-3121 and HBASE-16296.
        // TODO: remove once PHOENIX-3121 and/or HBASE-16296 are fixed.
        int scannerCacheSize = context.getStatement().getFetchSize();
        if (limit != null && limit % scannerCacheSize == 0) {
            scan.setCaching(scannerCacheSize + 1);
        }
    }
    if (statement.getHint().hasHint(Hint.SMALL)) {
        scan.setSmall(true);
    }
    PhoenixConnection connection = context.getConnection();
    // set read consistency
    if (table.getType() != PTableType.SYSTEM) {
        scan.setConsistency(connection.getConsistency());
    }
    // TODO fix this in PHOENIX-2415 Support ROW_TIMESTAMP with transactional tables
    if (!table.isTransactional()) {
        // Get the time range of row_timestamp column
        TimeRange rowTimestampRange = context.getScanRanges().getRowTimestampRange();
        // Get the already existing time range on the scan.
        TimeRange scanTimeRange = scan.getTimeRange();
        Long scn = connection.getSCN();
        if (scn == null) {
            // If we haven't resolved the time at the beginning of compilation, don't
            // force the lookup on the server, but use HConstants.LATEST_TIMESTAMP instead.
            scn = tableRef.getTimeStamp();
            if (scn == QueryConstants.UNSET_TIMESTAMP) {
                scn = HConstants.LATEST_TIMESTAMP;
            }
        }
        try {
            TimeRange timeRangeToUse = ScanUtil.intersectTimeRange(rowTimestampRange, scanTimeRange, scn);
            if (timeRangeToUse == null) {
                return ResultIterator.EMPTY_ITERATOR;
            }
            scan.setTimeRange(timeRangeToUse.getMin(), timeRangeToUse.getMax());
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
    }
    byte[] tenantIdBytes;
    if (table.isMultiTenant() == true) {
        tenantIdBytes = connection.getTenantId() == null ? null : ScanUtil.getTenantIdBytes(table.getRowKeySchema(), table.getBucketNum() != null, connection.getTenantId(), table.getViewIndexId() != null);
    } else {
        tenantIdBytes = connection.getTenantId() == null ? null : connection.getTenantId().getBytes();
    }
    ScanUtil.setTenantId(scan, tenantIdBytes);
    String customAnnotations = LogUtil.customAnnotationsToString(connection);
    ScanUtil.setCustomAnnotations(scan, customAnnotations == null ? null : customAnnotations.getBytes());
    // Set local index related scan attributes. 
    if (table.getIndexType() == IndexType.LOCAL) {
        ScanUtil.setLocalIndex(scan);
        Set<PColumn> dataColumns = context.getDataColumns();
        // project is not present in the index then we need to skip this plan.
        if (!dataColumns.isEmpty()) {
            // Set data columns to be join back from data table.
            PTable parentTable = context.getCurrentTable().getTable();
            String parentSchemaName = parentTable.getParentSchemaName().getString();
            String parentTableName = parentTable.getParentTableName().getString();
            final ParseNodeFactory FACTORY = new ParseNodeFactory();
            // TODO: is it necessary to re-resolve the table?
            TableRef dataTableRef = FromCompiler.getResolver(FACTORY.namedTable(null, TableName.create(parentSchemaName, parentTableName)), context.getConnection()).resolveTable(parentSchemaName, parentTableName);
            PTable dataTable = dataTableRef.getTable();
            // Set data columns to be join back from data table.
            serializeDataTableColumnsToJoin(scan, dataColumns, dataTable);
            KeyValueSchema schema = ProjectedColumnExpression.buildSchema(dataColumns);
            // Set key value schema of the data columns.
            serializeSchemaIntoScan(scan, schema);
            // Set index maintainer of the local index.
            serializeIndexMaintainerIntoScan(scan, dataTable);
            // Set view constants if exists.
            serializeViewConstantsIntoScan(scan, dataTable);
        }
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug(LogUtil.addCustomAnnotations("Scan ready for iteration: " + scan, connection));
    }
    ResultIterator iterator = newIterator(scanGrouper, scan);
    iterator = dependencies.isEmpty() ? iterator : new DelegateResultIterator(iterator) {

        @Override
        public void close() throws SQLException {
            try {
                super.close();
            } finally {
                SQLCloseables.closeAll(dependencies);
            }
        }
    };
    if (LOG.isDebugEnabled()) {
        LOG.debug(LogUtil.addCustomAnnotations("Iterator ready: " + iterator, connection));
    }
    // wrap the iterator so we start/end tracing as we expect
    TraceScope scope = Tracing.startNewSpan(context.getConnection(), "Creating basic query for " + getPlanSteps(iterator));
    return (scope.getSpan() != null) ? new TracingIterator(scope, iterator) : iterator;
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) TracingIterator(org.apache.phoenix.trace.TracingIterator) ResultIterator(org.apache.phoenix.iterate.ResultIterator) DelegateResultIterator(org.apache.phoenix.iterate.DelegateResultIterator) TraceScope(org.apache.htrace.TraceScope) DelegateResultIterator(org.apache.phoenix.iterate.DelegateResultIterator) IOException(java.io.IOException) PTable(org.apache.phoenix.schema.PTable) Hint(org.apache.phoenix.parse.HintNode.Hint) PColumn(org.apache.phoenix.schema.PColumn) TimeRange(org.apache.hadoop.hbase.io.TimeRange) KeyValueSchema(org.apache.phoenix.schema.KeyValueSchema) TableRef(org.apache.phoenix.schema.TableRef) ParseNodeFactory(org.apache.phoenix.parse.ParseNodeFactory)

Example 2 with ParseNodeFactory

use of org.apache.phoenix.parse.ParseNodeFactory in project phoenix by apache.

the class TraceQueryPlan method iterator.

@Override
public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException {
    final PhoenixConnection conn = stmt.getConnection();
    if (conn.getTraceScope() == null && !traceStatement.isTraceOn()) {
        return ResultIterator.EMPTY_ITERATOR;
    }
    return new ResultIterator() {

        @Override
        public void close() throws SQLException {
        }

        @Override
        public Tuple next() throws SQLException {
            if (!first)
                return null;
            TraceScope traceScope = conn.getTraceScope();
            if (traceStatement.isTraceOn()) {
                conn.setSampler(Tracing.getConfiguredSampler(traceStatement));
                if (conn.getSampler() == Sampler.NEVER) {
                    closeTraceScope(conn);
                }
                if (traceScope == null && !conn.getSampler().equals(Sampler.NEVER)) {
                    traceScope = Tracing.startNewSpan(conn, "Enabling trace");
                    if (traceScope.getSpan() != null) {
                        conn.setTraceScope(traceScope);
                    } else {
                        closeTraceScope(conn);
                    }
                }
            } else {
                closeTraceScope(conn);
                conn.setSampler(Sampler.NEVER);
            }
            if (traceScope == null || traceScope.getSpan() == null)
                return null;
            first = false;
            ImmutableBytesWritable ptr = new ImmutableBytesWritable();
            ParseNodeFactory factory = new ParseNodeFactory();
            LiteralParseNode literal = factory.literal(traceScope.getSpan().getTraceId());
            LiteralExpression expression = LiteralExpression.newConstant(literal.getValue(), PLong.INSTANCE, Determinism.ALWAYS);
            expression.evaluate(null, ptr);
            byte[] rowKey = ByteUtil.copyKeyBytesIfNecessary(ptr);
            Cell cell = CellUtil.createCell(rowKey, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, System.currentTimeMillis(), Type.Put.getCode(), HConstants.EMPTY_BYTE_ARRAY);
            List<Cell> cells = new ArrayList<Cell>(1);
            cells.add(cell);
            return new ResultTuple(Result.create(cells));
        }

        private void closeTraceScope(final PhoenixConnection conn) {
            if (conn.getTraceScope() != null) {
                conn.getTraceScope().close();
                conn.setTraceScope(null);
            }
        }

        @Override
        public void explain(List<String> planSteps) {
        }
    };
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) ResultTuple(org.apache.phoenix.schema.tuple.ResultTuple) ResultIterator(org.apache.phoenix.iterate.ResultIterator) TraceScope(org.apache.htrace.TraceScope) ArrayList(java.util.ArrayList) LiteralParseNode(org.apache.phoenix.parse.LiteralParseNode) ArrayList(java.util.ArrayList) List(java.util.List) Cell(org.apache.hadoop.hbase.Cell) ParseNodeFactory(org.apache.phoenix.parse.ParseNodeFactory)

Example 3 with ParseNodeFactory

use of org.apache.phoenix.parse.ParseNodeFactory in project phoenix by apache.

the class ListJarsQueryPlan method iterator.

@Override
public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException {
    return new ResultIterator() {

        private RemoteIterator<LocatedFileStatus> listFiles = null;

        @Override
        public void close() throws SQLException {
        }

        @Override
        public Tuple next() throws SQLException {
            try {
                if (first) {
                    String dynamicJarsDir = stmt.getConnection().getQueryServices().getProps().get(QueryServices.DYNAMIC_JARS_DIR_KEY);
                    if (dynamicJarsDir == null) {
                        throw new SQLException(QueryServices.DYNAMIC_JARS_DIR_KEY + " is not configured for the listing the jars.");
                    }
                    dynamicJarsDir = dynamicJarsDir.endsWith("/") ? dynamicJarsDir : dynamicJarsDir + '/';
                    Configuration conf = HBaseFactoryProvider.getConfigurationFactory().getConfiguration();
                    Path dynamicJarsDirPath = new Path(dynamicJarsDir);
                    FileSystem fs = dynamicJarsDirPath.getFileSystem(conf);
                    listFiles = fs.listFiles(dynamicJarsDirPath, true);
                    first = false;
                }
                if (listFiles == null || !listFiles.hasNext())
                    return null;
                ImmutableBytesWritable ptr = new ImmutableBytesWritable();
                ParseNodeFactory factory = new ParseNodeFactory();
                LiteralParseNode literal = factory.literal(listFiles.next().getPath().toString());
                LiteralExpression expression = LiteralExpression.newConstant(literal.getValue(), PVarchar.INSTANCE, Determinism.ALWAYS);
                expression.evaluate(null, ptr);
                byte[] rowKey = ByteUtil.copyKeyBytesIfNecessary(ptr);
                Cell cell = CellUtil.createCell(rowKey, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, System.currentTimeMillis(), Type.Put.getCode(), HConstants.EMPTY_BYTE_ARRAY);
                List<Cell> cells = new ArrayList<Cell>(1);
                cells.add(cell);
                return new ResultTuple(Result.create(cells));
            } catch (IOException e) {
                throw new SQLException(e);
            }
        }

        @Override
        public void explain(List<String> planSteps) {
        }
    };
}
Also used : Path(org.apache.hadoop.fs.Path) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) Configuration(org.apache.hadoop.conf.Configuration) SQLException(java.sql.SQLException) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) ResultTuple(org.apache.phoenix.schema.tuple.ResultTuple) ResultIterator(org.apache.phoenix.iterate.ResultIterator) ArrayList(java.util.ArrayList) IOException(java.io.IOException) LiteralParseNode(org.apache.phoenix.parse.LiteralParseNode) RemoteIterator(org.apache.hadoop.fs.RemoteIterator) FileSystem(org.apache.hadoop.fs.FileSystem) List(java.util.List) ArrayList(java.util.ArrayList) Cell(org.apache.hadoop.hbase.Cell) ParseNodeFactory(org.apache.phoenix.parse.ParseNodeFactory)

Aggregations

ResultIterator (org.apache.phoenix.iterate.ResultIterator)3 ParseNodeFactory (org.apache.phoenix.parse.ParseNodeFactory)3 IOException (java.io.IOException)2 ArrayList (java.util.ArrayList)2 List (java.util.List)2 Cell (org.apache.hadoop.hbase.Cell)2 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)2 TraceScope (org.apache.htrace.TraceScope)2 LiteralExpression (org.apache.phoenix.expression.LiteralExpression)2 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)2 LiteralParseNode (org.apache.phoenix.parse.LiteralParseNode)2 ResultTuple (org.apache.phoenix.schema.tuple.ResultTuple)2 SQLException (java.sql.SQLException)1 Configuration (org.apache.hadoop.conf.Configuration)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 Path (org.apache.hadoop.fs.Path)1 RemoteIterator (org.apache.hadoop.fs.RemoteIterator)1 TimeRange (org.apache.hadoop.hbase.io.TimeRange)1 DelegateResultIterator (org.apache.phoenix.iterate.DelegateResultIterator)1 Hint (org.apache.phoenix.parse.HintNode.Hint)1