Search in sources :

Example 21 with SelectStatement

use of org.apache.phoenix.parse.SelectStatement in project phoenix by apache.

the class CreateTableCompiler method compile.

public MutationPlan compile(CreateTableStatement create) throws SQLException {
    final PhoenixConnection connection = statement.getConnection();
    ColumnResolver resolver = FromCompiler.getResolverForCreation(create, connection);
    PTableType type = create.getTableType();
    PTable parentToBe = null;
    ViewType viewTypeToBe = null;
    Scan scan = new Scan();
    final StatementContext context = new StatementContext(statement, resolver, scan, new SequenceManager(statement));
    // TODO: support any statement for a VIEW instead of just a WHERE clause
    ParseNode whereNode = create.getWhereClause();
    String viewStatementToBe = null;
    byte[][] viewColumnConstantsToBe = null;
    BitSet isViewColumnReferencedToBe = null;
    // Check whether column families having local index column family suffix or not if present
    // don't allow creating table.
    // Also validate the default values expressions.
    List<ColumnDef> columnDefs = create.getColumnDefs();
    List<ColumnDef> overideColumnDefs = null;
    PrimaryKeyConstraint pkConstraint = create.getPrimaryKeyConstraint();
    for (int i = 0; i < columnDefs.size(); i++) {
        ColumnDef columnDef = columnDefs.get(i);
        if (columnDef.getColumnDefName().getFamilyName() != null && columnDef.getColumnDefName().getFamilyName().contains(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNALLOWED_COLUMN_FAMILY).build().buildException();
        }
        // False means we do not need the default (because it evaluated to null)
        if (!columnDef.validateDefault(context, pkConstraint)) {
            if (overideColumnDefs == null) {
                overideColumnDefs = new ArrayList<>(columnDefs);
            }
            overideColumnDefs.set(i, new ColumnDef(columnDef, null));
        }
    }
    if (overideColumnDefs != null) {
        create = new CreateTableStatement(create, overideColumnDefs);
    }
    final CreateTableStatement finalCreate = create;
    if (type == PTableType.VIEW) {
        TableRef tableRef = resolver.getTables().get(0);
        int nColumns = tableRef.getTable().getColumns().size();
        isViewColumnReferencedToBe = new BitSet(nColumns);
        // Used to track column references in a view
        ExpressionCompiler expressionCompiler = new ColumnTrackingExpressionCompiler(context, isViewColumnReferencedToBe);
        parentToBe = tableRef.getTable();
        viewTypeToBe = parentToBe.getViewType() == ViewType.MAPPED ? ViewType.MAPPED : ViewType.UPDATABLE;
        if (whereNode == null) {
            viewStatementToBe = parentToBe.getViewStatement();
        } else {
            whereNode = StatementNormalizer.normalize(whereNode, resolver);
            if (whereNode.isStateless()) {
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.VIEW_WHERE_IS_CONSTANT).build().buildException();
            }
            // If our parent has a VIEW statement, combine it with this one
            if (parentToBe.getViewStatement() != null) {
                SelectStatement select = new SQLParser(parentToBe.getViewStatement()).parseQuery().combine(whereNode);
                whereNode = select.getWhere();
            }
            Expression where = whereNode.accept(expressionCompiler);
            if (where != null && !LiteralExpression.isTrue(where)) {
                TableName baseTableName = create.getBaseTableName();
                StringBuilder buf = new StringBuilder();
                whereNode.toSQL(resolver, buf);
                viewStatementToBe = QueryUtil.getViewStatement(baseTableName.getSchemaName(), baseTableName.getTableName(), buf.toString());
            }
            if (viewTypeToBe != ViewType.MAPPED) {
                viewColumnConstantsToBe = new byte[nColumns][];
                ViewWhereExpressionVisitor visitor = new ViewWhereExpressionVisitor(parentToBe, viewColumnConstantsToBe);
                where.accept(visitor);
                // If view is not updatable, viewColumnConstants should be empty. We will still
                // inherit our parent viewConstants, but we have no additional ones.
                viewTypeToBe = visitor.isUpdatable() ? ViewType.UPDATABLE : ViewType.READ_ONLY;
                if (viewTypeToBe != ViewType.UPDATABLE) {
                    viewColumnConstantsToBe = null;
                }
            }
        }
    }
    final ViewType viewType = viewTypeToBe;
    final String viewStatement = viewStatementToBe;
    final byte[][] viewColumnConstants = viewColumnConstantsToBe;
    final BitSet isViewColumnReferenced = isViewColumnReferencedToBe;
    List<ParseNode> splitNodes = create.getSplitNodes();
    final byte[][] splits = new byte[splitNodes.size()][];
    ImmutableBytesWritable ptr = context.getTempPtr();
    ExpressionCompiler expressionCompiler = new ExpressionCompiler(context);
    for (int i = 0; i < splits.length; i++) {
        ParseNode node = splitNodes.get(i);
        if (node instanceof BindParseNode) {
            context.getBindManager().addParamMetaData((BindParseNode) node, VARBINARY_DATUM);
        }
        if (node.isStateless()) {
            Expression expression = node.accept(expressionCompiler);
            if (expression.evaluate(null, ptr)) {
                ;
                splits[i] = ByteUtil.copyKeyBytesIfNecessary(ptr);
                continue;
            }
        }
        throw new SQLExceptionInfo.Builder(SQLExceptionCode.SPLIT_POINT_NOT_CONSTANT).setMessage("Node: " + node).build().buildException();
    }
    final MetaDataClient client = new MetaDataClient(connection);
    final PTable parent = parentToBe;
    return new BaseMutationPlan(context, operation) {

        @Override
        public MutationState execute() throws SQLException {
            try {
                return client.createTable(finalCreate, splits, parent, viewStatement, viewType, viewColumnConstants, isViewColumnReferenced);
            } finally {
                if (client.getConnection() != connection) {
                    client.getConnection().close();
                }
            }
        }

        @Override
        public ExplainPlan getExplainPlan() throws SQLException {
            return new ExplainPlan(Collections.singletonList("CREATE TABLE"));
        }
    };
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) PTable(org.apache.phoenix.schema.PTable) SelectStatement(org.apache.phoenix.parse.SelectStatement) BindParseNode(org.apache.phoenix.parse.BindParseNode) ColumnParseNode(org.apache.phoenix.parse.ColumnParseNode) ParseNode(org.apache.phoenix.parse.ParseNode) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo) MetaDataClient(org.apache.phoenix.schema.MetaDataClient) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) PTableType(org.apache.phoenix.schema.PTableType) CreateTableStatement(org.apache.phoenix.parse.CreateTableStatement) BitSet(java.util.BitSet) ColumnDef(org.apache.phoenix.parse.ColumnDef) PrimaryKeyConstraint(org.apache.phoenix.parse.PrimaryKeyConstraint) PrimaryKeyConstraint(org.apache.phoenix.parse.PrimaryKeyConstraint) TableName(org.apache.phoenix.parse.TableName) SQLParser(org.apache.phoenix.parse.SQLParser) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) Expression(org.apache.phoenix.expression.Expression) SingleCellColumnExpression(org.apache.phoenix.expression.SingleCellColumnExpression) RowKeyColumnExpression(org.apache.phoenix.expression.RowKeyColumnExpression) IsNullExpression(org.apache.phoenix.expression.IsNullExpression) AndExpression(org.apache.phoenix.expression.AndExpression) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) ComparisonExpression(org.apache.phoenix.expression.ComparisonExpression) Scan(org.apache.hadoop.hbase.client.Scan) ViewType(org.apache.phoenix.schema.PTable.ViewType) TableRef(org.apache.phoenix.schema.TableRef) BindParseNode(org.apache.phoenix.parse.BindParseNode)

Example 22 with SelectStatement

use of org.apache.phoenix.parse.SelectStatement in project phoenix by apache.

the class DeleteCompiler method compile.

public MutationPlan compile(DeleteStatement delete) throws SQLException {
    final PhoenixConnection connection = statement.getConnection();
    final boolean isAutoCommit = connection.getAutoCommit();
    final boolean hasPostProcessing = delete.getLimit() != null;
    final ConnectionQueryServices services = connection.getQueryServices();
    List<QueryPlan> queryPlans;
    NamedTableNode tableNode = delete.getTable();
    String tableName = tableNode.getName().getTableName();
    String schemaName = tableNode.getName().getSchemaName();
    SelectStatement select = null;
    ColumnResolver resolverToBe = null;
    DeletingParallelIteratorFactory parallelIteratorFactoryToBe;
    resolverToBe = FromCompiler.getResolverForMutation(delete, connection);
    final TableRef targetTableRef = resolverToBe.getTables().get(0);
    PTable table = targetTableRef.getTable();
    // TODO: SchemaUtil.isReadOnly(PTable, connection)?
    if (table.getType() == PTableType.VIEW && table.getViewType().isReadOnly()) {
        throw new ReadOnlyTableException(schemaName, tableName);
    } else if (table.isTransactional() && connection.getSCN() != null) {
        throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SPECIFY_SCN_FOR_TXN_TABLE).setSchemaName(schemaName).setTableName(tableName).build().buildException();
    }
    List<PTable> clientSideIndexes = getClientSideMaintainedIndexes(targetTableRef);
    final boolean hasClientSideIndexes = !clientSideIndexes.isEmpty();
    boolean isSalted = table.getBucketNum() != null;
    boolean isMultiTenant = connection.getTenantId() != null && table.isMultiTenant();
    boolean isSharedViewIndex = table.getViewIndexId() != null;
    int pkColumnOffset = (isSalted ? 1 : 0) + (isMultiTenant ? 1 : 0) + (isSharedViewIndex ? 1 : 0);
    final int pkColumnCount = table.getPKColumns().size() - pkColumnOffset;
    int selectColumnCount = pkColumnCount;
    for (PTable index : clientSideIndexes) {
        selectColumnCount += index.getPKColumns().size() - pkColumnCount;
    }
    Set<PColumn> projectedColumns = new LinkedHashSet<PColumn>(selectColumnCount + pkColumnOffset);
    List<AliasedNode> aliasedNodes = Lists.newArrayListWithExpectedSize(selectColumnCount);
    for (int i = isSalted ? 1 : 0; i < pkColumnOffset; i++) {
        PColumn column = table.getPKColumns().get(i);
        projectedColumns.add(column);
    }
    for (int i = pkColumnOffset; i < table.getPKColumns().size(); i++) {
        PColumn column = table.getPKColumns().get(i);
        projectedColumns.add(column);
        aliasedNodes.add(FACTORY.aliasedNode(null, FACTORY.column(null, '"' + column.getName().getString() + '"', null)));
    }
    // Project all non PK indexed columns so that we can do the proper index maintenance
    for (PTable index : table.getIndexes()) {
        IndexMaintainer maintainer = index.getIndexMaintainer(table, connection);
        // Go through maintainer as it handles functional indexes correctly
        for (Pair<String, String> columnInfo : maintainer.getIndexedColumnInfo()) {
            String familyName = columnInfo.getFirst();
            if (familyName != null) {
                String columnName = columnInfo.getSecond();
                boolean hasNoColumnFamilies = table.getColumnFamilies().isEmpty();
                PColumn column = hasNoColumnFamilies ? table.getColumnForColumnName(columnName) : table.getColumnFamily(familyName).getPColumnForColumnName(columnName);
                if (!projectedColumns.contains(column)) {
                    projectedColumns.add(column);
                    aliasedNodes.add(FACTORY.aliasedNode(null, FACTORY.column(hasNoColumnFamilies ? null : TableName.create(null, familyName), '"' + columnName + '"', null)));
                }
            }
        }
    }
    select = FACTORY.select(delete.getTable(), delete.getHint(), false, aliasedNodes, delete.getWhere(), Collections.<ParseNode>emptyList(), null, delete.getOrderBy(), delete.getLimit(), null, delete.getBindCount(), false, false, Collections.<SelectStatement>emptyList(), delete.getUdfParseNodes());
    select = StatementNormalizer.normalize(select, resolverToBe);
    SelectStatement transformedSelect = SubqueryRewriter.transform(select, resolverToBe, connection);
    boolean hasPreProcessing = transformedSelect != select;
    if (transformedSelect != select) {
        resolverToBe = FromCompiler.getResolverForQuery(transformedSelect, connection, false, delete.getTable().getName());
        select = StatementNormalizer.normalize(transformedSelect, resolverToBe);
    }
    final boolean hasPreOrPostProcessing = hasPreProcessing || hasPostProcessing;
    boolean noQueryReqd = !hasPreOrPostProcessing;
    // No limit and no sub queries, joins, etc in where clause
    // Can't run on same server for transactional data, as we need the row keys for the data
    // that is being upserted for conflict detection purposes.
    // If we have immutable indexes, we'd increase the number of bytes scanned by executing
    // separate queries against each index, so better to drive from a single table in that case.
    boolean runOnServer = isAutoCommit && !hasPreOrPostProcessing && !table.isTransactional() && !hasClientSideIndexes;
    HintNode hint = delete.getHint();
    if (runOnServer && !delete.getHint().hasHint(Hint.USE_INDEX_OVER_DATA_TABLE)) {
        select = SelectStatement.create(select, HintNode.create(hint, Hint.USE_DATA_OVER_INDEX_TABLE));
    }
    parallelIteratorFactoryToBe = hasPreOrPostProcessing ? null : new DeletingParallelIteratorFactory(connection);
    QueryOptimizer optimizer = new QueryOptimizer(services);
    QueryCompiler compiler = new QueryCompiler(statement, select, resolverToBe, Collections.<PColumn>emptyList(), parallelIteratorFactoryToBe, new SequenceManager(statement));
    final QueryPlan dataPlan = compiler.compile();
    // TODO: the select clause should know that there's a sub query, but doesn't seem to currently
    queryPlans = Lists.newArrayList(!clientSideIndexes.isEmpty() ? optimizer.getApplicablePlans(dataPlan, statement, select, resolverToBe, Collections.<PColumn>emptyList(), parallelIteratorFactoryToBe) : optimizer.getBestPlan(dataPlan, statement, select, resolverToBe, Collections.<PColumn>emptyList(), parallelIteratorFactoryToBe));
    // Filter out any local indexes that don't contain all indexed columns.
    // We have to do this manually because local indexes are still used
    // when referenced columns aren't in the index, so they won't be
    // filtered by the optimizer.
    queryPlans = new ArrayList<>(queryPlans);
    Iterator<QueryPlan> iterator = queryPlans.iterator();
    while (iterator.hasNext()) {
        QueryPlan plan = iterator.next();
        if (plan.getTableRef().getTable().getIndexType() == IndexType.LOCAL) {
            if (!plan.getContext().getDataColumns().isEmpty()) {
                iterator.remove();
            }
        }
    }
    if (queryPlans.isEmpty()) {
        queryPlans = Collections.singletonList(dataPlan);
    }
    runOnServer &= queryPlans.get(0).getTableRef().getTable().getType() != PTableType.INDEX;
    // We need to have all indexed columns available in all immutable indexes in order
    // to generate the delete markers from the query. We also cannot have any filters
    // except for our SkipScanFilter for point lookups.
    // A simple check of the non existence of a where clause in the parse node is not sufficient, as the where clause
    // may have been optimized out. Instead, we check that there's a single SkipScanFilter
    // If we can generate a plan for every index, that means all the required columns are available in every index,
    // hence we can drive the delete from any of the plans.
    noQueryReqd &= queryPlans.size() == 1 + clientSideIndexes.size();
    int queryPlanIndex = 0;
    while (noQueryReqd && queryPlanIndex < queryPlans.size()) {
        QueryPlan plan = queryPlans.get(queryPlanIndex++);
        StatementContext context = plan.getContext();
        noQueryReqd &= (!context.getScan().hasFilter() || context.getScan().getFilter() instanceof SkipScanFilter) && context.getScanRanges().isPointLookup();
    }
    final int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
    final int maxSizeBytes = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES);
    // If we're doing a query for a set of rows with no where clause, then we don't need to contact the server at all.
    if (noQueryReqd) {
        // Create a mutationPlan for each queryPlan. One plan will be for the deletion of the rows
        // from the data table, while the others will be for deleting rows from immutable indexes.
        List<MutationPlan> mutationPlans = Lists.newArrayListWithExpectedSize(queryPlans.size());
        for (final QueryPlan plan : queryPlans) {
            mutationPlans.add(new SingleRowDeleteMutationPlan(plan, connection, maxSize, maxSizeBytes));
        }
        return new MultiRowDeleteMutationPlan(dataPlan, mutationPlans);
    } else if (runOnServer) {
        // TODO: better abstraction
        final StatementContext context = dataPlan.getContext();
        Scan scan = context.getScan();
        scan.setAttribute(BaseScannerRegionObserver.DELETE_AGG, QueryConstants.TRUE);
        // Build an ungrouped aggregate query: select COUNT(*) from <table> where <where>
        // The coprocessor will delete each row returned from the scan
        // Ignoring ORDER BY, since with auto commit on and no limit makes no difference
        SelectStatement aggSelect = SelectStatement.create(SelectStatement.COUNT_ONE, delete.getHint());
        RowProjector projectorToBe = ProjectionCompiler.compile(context, aggSelect, GroupBy.EMPTY_GROUP_BY);
        context.getAggregationManager().compile(context, GroupBy.EMPTY_GROUP_BY);
        if (dataPlan.getProjector().projectEveryRow()) {
            projectorToBe = new RowProjector(projectorToBe, true);
        }
        final RowProjector projector = projectorToBe;
        final QueryPlan aggPlan = new AggregatePlan(context, select, dataPlan.getTableRef(), projector, null, null, OrderBy.EMPTY_ORDER_BY, null, GroupBy.EMPTY_GROUP_BY, null, dataPlan);
        return new ServerSelectDeleteMutationPlan(dataPlan, connection, aggPlan, projector, maxSize, maxSizeBytes);
    } else {
        final DeletingParallelIteratorFactory parallelIteratorFactory = parallelIteratorFactoryToBe;
        List<PColumn> adjustedProjectedColumns = Lists.newArrayListWithExpectedSize(projectedColumns.size());
        final int offset = table.getBucketNum() == null ? 0 : 1;
        Iterator<PColumn> projectedColsItr = projectedColumns.iterator();
        int i = 0;
        while (projectedColsItr.hasNext()) {
            final int position = i++;
            adjustedProjectedColumns.add(new DelegateColumn(projectedColsItr.next()) {

                @Override
                public int getPosition() {
                    return position + offset;
                }
            });
        }
        PTable projectedTable = PTableImpl.makePTable(table, PTableType.PROJECTED, adjustedProjectedColumns);
        final TableRef projectedTableRef = new TableRef(projectedTable, targetTableRef.getLowerBoundTimeStamp(), targetTableRef.getTimeStamp());
        QueryPlan bestPlanToBe = dataPlan;
        for (QueryPlan plan : queryPlans) {
            PTable planTable = plan.getTableRef().getTable();
            if (planTable.getIndexState() != PIndexState.BUILDING) {
                bestPlanToBe = plan;
                break;
            }
        }
        final QueryPlan bestPlan = bestPlanToBe;
        final List<TableRef> otherTableRefs = Lists.newArrayListWithExpectedSize(clientSideIndexes.size());
        for (PTable index : clientSideIndexes) {
            if (!bestPlan.getTableRef().getTable().equals(index)) {
                otherTableRefs.add(new TableRef(index, targetTableRef.getLowerBoundTimeStamp(), targetTableRef.getTimeStamp()));
            }
        }
        if (!bestPlan.getTableRef().getTable().equals(targetTableRef.getTable())) {
            otherTableRefs.add(projectedTableRef);
        }
        return new ClientSelectDeleteMutationPlan(targetTableRef, dataPlan, bestPlan, hasPreOrPostProcessing, parallelIteratorFactory, otherTableRefs, projectedTableRef, maxSize, maxSizeBytes, connection);
    }
}
Also used : LinkedHashSet(java.util.LinkedHashSet) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) PTable(org.apache.phoenix.schema.PTable) PColumn(org.apache.phoenix.schema.PColumn) SelectStatement(org.apache.phoenix.parse.SelectStatement) IndexMaintainer(org.apache.phoenix.index.IndexMaintainer) ResultIterator(org.apache.phoenix.iterate.ResultIterator) Iterator(java.util.Iterator) DelegateColumn(org.apache.phoenix.schema.DelegateColumn) ParseNode(org.apache.phoenix.parse.ParseNode) List(java.util.List) ArrayList(java.util.ArrayList) AggregatePlan(org.apache.phoenix.execute.AggregatePlan) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo) SkipScanFilter(org.apache.phoenix.filter.SkipScanFilter) QueryOptimizer(org.apache.phoenix.optimize.QueryOptimizer) AliasedNode(org.apache.phoenix.parse.AliasedNode) Hint(org.apache.phoenix.parse.HintNode.Hint) ReadOnlyTableException(org.apache.phoenix.schema.ReadOnlyTableException) HintNode(org.apache.phoenix.parse.HintNode) NamedTableNode(org.apache.phoenix.parse.NamedTableNode) Scan(org.apache.hadoop.hbase.client.Scan) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices) TableRef(org.apache.phoenix.schema.TableRef)

Example 23 with SelectStatement

use of org.apache.phoenix.parse.SelectStatement in project phoenix by apache.

the class QueryOptimizer method addPlan.

private QueryPlan addPlan(PhoenixStatement statement, SelectStatement select, PTable index, List<? extends PDatum> targetColumns, ParallelIteratorFactory parallelIteratorFactory, QueryPlan dataPlan, boolean isHinted) throws SQLException {
    int nColumns = dataPlan.getProjector().getColumnCount();
    String tableAlias = dataPlan.getTableRef().getTableAlias();
    // double quote in case it's case sensitive
    String alias = tableAlias == null ? null : '"' + tableAlias + '"';
    String schemaName = index.getParentSchemaName().getString();
    schemaName = schemaName.length() == 0 ? null : '"' + schemaName + '"';
    String tableName = '"' + index.getTableName().getString() + '"';
    TableNode table = FACTORY.namedTable(alias, FACTORY.table(schemaName, tableName), select.getTableSamplingRate());
    SelectStatement indexSelect = FACTORY.select(select, table);
    ColumnResolver resolver = FromCompiler.getResolverForQuery(indexSelect, statement.getConnection());
    // We will or will not do tuple projection according to the data plan.
    boolean isProjected = dataPlan.getContext().getResolver().getTables().get(0).getTable().getType() == PTableType.PROJECTED;
    // Check index state of now potentially updated index table to make sure it's active
    TableRef indexTableRef = resolver.getTables().get(0);
    PTable indexTable = indexTableRef.getTable();
    PIndexState indexState = indexTable.getIndexState();
    Map<TableRef, QueryPlan> dataPlans = Collections.singletonMap(indexTableRef, dataPlan);
    if (indexState == PIndexState.ACTIVE || indexState == PIndexState.PENDING_ACTIVE || (indexState == PIndexState.PENDING_DISABLE && isUnderPendingDisableThreshold(indexTableRef.getCurrentTime(), indexTable.getIndexDisableTimestamp()))) {
        try {
            // translate nodes that match expressions that are indexed to the associated column parse node
            indexSelect = ParseNodeRewriter.rewrite(indexSelect, new IndexExpressionParseNodeRewriter(index, null, statement.getConnection(), indexSelect.getUdfParseNodes()));
            QueryCompiler compiler = new QueryCompiler(statement, indexSelect, resolver, targetColumns, parallelIteratorFactory, dataPlan.getContext().getSequenceManager(), isProjected, true, dataPlans);
            QueryPlan plan = compiler.compile();
            // then we can use the index even the query doesn't have where clause.
            if (index.getIndexType() == IndexType.LOCAL && indexSelect.getWhere() == null && !plan.getContext().getDataColumns().isEmpty()) {
                return null;
            }
            indexTableRef = plan.getTableRef();
            indexTable = indexTableRef.getTable();
            indexState = indexTable.getIndexState();
            // must contain all columns from the data table to be able to be used.
            if (indexState == PIndexState.ACTIVE || indexState == PIndexState.PENDING_ACTIVE || (indexState == PIndexState.PENDING_DISABLE && isUnderPendingDisableThreshold(indexTableRef.getCurrentTime(), indexTable.getIndexDisableTimestamp()))) {
                if (plan.getProjector().getColumnCount() == nColumns) {
                    return plan;
                } else if (index.getIndexType() == IndexType.GLOBAL) {
                    String schemaNameStr = index.getSchemaName() == null ? null : index.getSchemaName().getString();
                    String tableNameStr = index.getTableName() == null ? null : index.getTableName().getString();
                    throw new ColumnNotFoundException(schemaNameStr, tableNameStr, null, "*");
                }
            }
        } catch (ColumnNotFoundException e) {
            /* Means that a column is being used that's not in our index.
                 * Since we currently don't keep stats, we don't know the selectivity of the index.
                 * For now, if this is a hinted plan, we will try rewriting the query as a subquery;
                 * otherwise we just don't use this index (as opposed to trying to join back from
                 * the index table to the data table.
                 */
            SelectStatement dataSelect = (SelectStatement) dataPlan.getStatement();
            ParseNode where = dataSelect.getWhere();
            if (isHinted && where != null) {
                StatementContext context = new StatementContext(statement, resolver);
                WhereConditionRewriter whereRewriter = new WhereConditionRewriter(FromCompiler.getResolver(dataPlan.getTableRef()), context);
                where = where.accept(whereRewriter);
                if (where != null) {
                    PTable dataTable = dataPlan.getTableRef().getTable();
                    List<PColumn> pkColumns = dataTable.getPKColumns();
                    List<AliasedNode> aliasedNodes = Lists.<AliasedNode>newArrayListWithExpectedSize(pkColumns.size());
                    List<ParseNode> nodes = Lists.<ParseNode>newArrayListWithExpectedSize(pkColumns.size());
                    boolean isSalted = dataTable.getBucketNum() != null;
                    boolean isTenantSpecific = dataTable.isMultiTenant() && statement.getConnection().getTenantId() != null;
                    int posOffset = (isSalted ? 1 : 0) + (isTenantSpecific ? 1 : 0);
                    for (int i = posOffset; i < pkColumns.size(); i++) {
                        PColumn column = pkColumns.get(i);
                        String indexColName = IndexUtil.getIndexColumnName(column);
                        ParseNode indexColNode = new ColumnParseNode(null, '"' + indexColName + '"', indexColName);
                        PDataType indexColType = IndexUtil.getIndexColumnDataType(column);
                        PDataType dataColType = column.getDataType();
                        if (indexColType != dataColType) {
                            indexColNode = FACTORY.cast(indexColNode, dataColType, null, null);
                        }
                        aliasedNodes.add(FACTORY.aliasedNode(null, indexColNode));
                        nodes.add(new ColumnParseNode(null, '"' + column.getName().getString() + '"'));
                    }
                    SelectStatement innerSelect = FACTORY.select(indexSelect.getFrom(), indexSelect.getHint(), false, aliasedNodes, where, null, null, null, null, null, indexSelect.getBindCount(), false, indexSelect.hasSequence(), Collections.<SelectStatement>emptyList(), indexSelect.getUdfParseNodes());
                    ParseNode outerWhere = FACTORY.in(nodes.size() == 1 ? nodes.get(0) : FACTORY.rowValueConstructor(nodes), FACTORY.subquery(innerSelect, false), false, true);
                    ParseNode extractedCondition = whereRewriter.getExtractedCondition();
                    if (extractedCondition != null) {
                        outerWhere = FACTORY.and(Lists.newArrayList(outerWhere, extractedCondition));
                    }
                    HintNode hint = HintNode.combine(HintNode.subtract(indexSelect.getHint(), new Hint[] { Hint.INDEX, Hint.NO_CHILD_PARENT_JOIN_OPTIMIZATION }), FACTORY.hint("NO_INDEX"));
                    SelectStatement query = FACTORY.select(dataSelect, hint, outerWhere);
                    ColumnResolver queryResolver = FromCompiler.getResolverForQuery(query, statement.getConnection());
                    query = SubqueryRewriter.transform(query, queryResolver, statement.getConnection());
                    queryResolver = FromCompiler.getResolverForQuery(query, statement.getConnection());
                    query = StatementNormalizer.normalize(query, queryResolver);
                    QueryPlan plan = new QueryCompiler(statement, query, queryResolver, targetColumns, parallelIteratorFactory, dataPlan.getContext().getSequenceManager(), isProjected, true, dataPlans).compile();
                    return plan;
                }
            }
        }
    }
    return null;
}
Also used : PIndexState(org.apache.phoenix.schema.PIndexState) BaseQueryPlan(org.apache.phoenix.execute.BaseQueryPlan) QueryPlan(org.apache.phoenix.compile.QueryPlan) QueryCompiler(org.apache.phoenix.compile.QueryCompiler) Hint(org.apache.phoenix.parse.HintNode.Hint) PTable(org.apache.phoenix.schema.PTable) StatementContext(org.apache.phoenix.compile.StatementContext) PColumn(org.apache.phoenix.schema.PColumn) SelectStatement(org.apache.phoenix.parse.SelectStatement) ColumnNotFoundException(org.apache.phoenix.schema.ColumnNotFoundException) PDataType(org.apache.phoenix.schema.types.PDataType) ColumnParseNode(org.apache.phoenix.parse.ColumnParseNode) HintNode(org.apache.phoenix.parse.HintNode) TableNode(org.apache.phoenix.parse.TableNode) JoinTableNode(org.apache.phoenix.parse.JoinTableNode) NamedTableNode(org.apache.phoenix.parse.NamedTableNode) BindTableNode(org.apache.phoenix.parse.BindTableNode) DerivedTableNode(org.apache.phoenix.parse.DerivedTableNode) ColumnParseNode(org.apache.phoenix.parse.ColumnParseNode) AndParseNode(org.apache.phoenix.parse.AndParseNode) ParseNode(org.apache.phoenix.parse.ParseNode) List(java.util.List) IndexExpressionParseNodeRewriter(org.apache.phoenix.parse.IndexExpressionParseNodeRewriter) ColumnResolver(org.apache.phoenix.compile.ColumnResolver) TableRef(org.apache.phoenix.schema.TableRef)

Example 24 with SelectStatement

use of org.apache.phoenix.parse.SelectStatement in project phoenix by apache.

the class QueryOptimizer method rewriteQueryWithIndexReplacement.

private static SelectStatement rewriteQueryWithIndexReplacement(final PhoenixConnection connection, final ColumnResolver resolver, final SelectStatement select, final Map<TableRef, TableRef> replacement) throws SQLException {
    TableNode from = select.getFrom();
    TableNode newFrom = from.accept(new TableNodeVisitor<TableNode>() {

        private TableRef resolveTable(String alias, TableName name) throws SQLException {
            if (alias != null)
                return resolver.resolveTable(null, alias);
            return resolver.resolveTable(name.getSchemaName(), name.getTableName());
        }

        private TableName getReplacedTableName(TableRef tableRef) {
            String schemaName = tableRef.getTable().getSchemaName().getString();
            return TableName.create(schemaName.length() == 0 ? null : schemaName, tableRef.getTable().getTableName().getString());
        }

        @Override
        public TableNode visit(BindTableNode boundTableNode) throws SQLException {
            TableRef tableRef = resolveTable(boundTableNode.getAlias(), boundTableNode.getName());
            TableRef replaceRef = replacement.get(tableRef);
            if (replaceRef == null)
                return boundTableNode;
            String alias = boundTableNode.getAlias();
            return FACTORY.bindTable(alias == null ? null : '"' + alias + '"', getReplacedTableName(replaceRef));
        }

        @Override
        public TableNode visit(JoinTableNode joinNode) throws SQLException {
            TableNode lhs = joinNode.getLHS();
            TableNode rhs = joinNode.getRHS();
            TableNode lhsReplace = lhs.accept(this);
            TableNode rhsReplace = rhs.accept(this);
            if (lhs == lhsReplace && rhs == rhsReplace)
                return joinNode;
            return FACTORY.join(joinNode.getType(), lhsReplace, rhsReplace, joinNode.getOnNode(), joinNode.isSingleValueOnly());
        }

        @Override
        public TableNode visit(NamedTableNode namedTableNode) throws SQLException {
            TableRef tableRef = resolveTable(namedTableNode.getAlias(), namedTableNode.getName());
            TableRef replaceRef = replacement.get(tableRef);
            if (replaceRef == null)
                return namedTableNode;
            String alias = namedTableNode.getAlias();
            return FACTORY.namedTable(alias == null ? null : '"' + alias + '"', getReplacedTableName(replaceRef), namedTableNode.getDynamicColumns(), namedTableNode.getTableSamplingRate());
        }

        @Override
        public TableNode visit(DerivedTableNode subselectNode) throws SQLException {
            return subselectNode;
        }
    });
    if (from == newFrom) {
        return select;
    }
    SelectStatement indexSelect = IndexStatementRewriter.translate(FACTORY.select(select, newFrom), resolver, replacement);
    for (TableRef indexTableRef : replacement.values()) {
        // replace expressions with corresponding matching columns for functional indexes
        indexSelect = ParseNodeRewriter.rewrite(indexSelect, new IndexExpressionParseNodeRewriter(indexTableRef.getTable(), indexTableRef.getTableAlias(), connection, indexSelect.getUdfParseNodes()));
    }
    return indexSelect;
}
Also used : SQLException(java.sql.SQLException) JoinTableNode(org.apache.phoenix.parse.JoinTableNode) TableName(org.apache.phoenix.parse.TableName) BindTableNode(org.apache.phoenix.parse.BindTableNode) SelectStatement(org.apache.phoenix.parse.SelectStatement) DerivedTableNode(org.apache.phoenix.parse.DerivedTableNode) TableNode(org.apache.phoenix.parse.TableNode) JoinTableNode(org.apache.phoenix.parse.JoinTableNode) NamedTableNode(org.apache.phoenix.parse.NamedTableNode) BindTableNode(org.apache.phoenix.parse.BindTableNode) DerivedTableNode(org.apache.phoenix.parse.DerivedTableNode) NamedTableNode(org.apache.phoenix.parse.NamedTableNode) IndexExpressionParseNodeRewriter(org.apache.phoenix.parse.IndexExpressionParseNodeRewriter) TableRef(org.apache.phoenix.schema.TableRef)

Aggregations

SelectStatement (org.apache.phoenix.parse.SelectStatement)24 ParseNode (org.apache.phoenix.parse.ParseNode)14 TableRef (org.apache.phoenix.schema.TableRef)13 ColumnParseNode (org.apache.phoenix.parse.ColumnParseNode)9 TableNode (org.apache.phoenix.parse.TableNode)9 PTable (org.apache.phoenix.schema.PTable)9 Scan (org.apache.hadoop.hbase.client.Scan)8 AndParseNode (org.apache.phoenix.parse.AndParseNode)7 AliasedNode (org.apache.phoenix.parse.AliasedNode)6 SubqueryParseNode (org.apache.phoenix.parse.SubqueryParseNode)6 ComparisonParseNode (org.apache.phoenix.parse.ComparisonParseNode)5 Hint (org.apache.phoenix.parse.HintNode.Hint)5 JoinType (org.apache.phoenix.parse.JoinTableNode.JoinType)5 LiteralParseNode (org.apache.phoenix.parse.LiteralParseNode)5 SQLParser (org.apache.phoenix.parse.SQLParser)5 List (java.util.List)4 AggregatePlan (org.apache.phoenix.execute.AggregatePlan)4 CompoundParseNode (org.apache.phoenix.parse.CompoundParseNode)4 ExistsParseNode (org.apache.phoenix.parse.ExistsParseNode)4 HintNode (org.apache.phoenix.parse.HintNode)4