Search in sources :

Example 1 with HintNode

use of org.apache.phoenix.parse.HintNode in project phoenix by apache.

the class SubselectRewriter method flatten.

private SelectStatement flatten(SelectStatement select, SelectStatement subselect) throws SQLException {
    // Replace aliases in sub-select first.
    subselect = ParseNodeRewriter.rewrite(subselect, this);
    ParseNode whereRewrite = subselect.getWhere();
    List<ParseNode> groupByRewrite = subselect.getGroupBy();
    ParseNode havingRewrite = subselect.getHaving();
    List<OrderByNode> orderByRewrite = subselect.getOrderBy();
    LimitNode limitRewrite = subselect.getLimit();
    OffsetNode offsetRewrite = subselect.getOffset();
    HintNode hintRewrite = subselect.getHint();
    boolean isDistinctRewrite = subselect.isDistinct();
    boolean isAggregateRewrite = subselect.isAggregate();
    ParseNode where = select.getWhere();
    if (where != null) {
        if (subselect.getLimit() != null || (subselect.isAggregate() && subselect.getGroupBy().isEmpty())) {
            return select;
        }
        ParseNode postFilter = where.accept(this);
        if (subselect.getGroupBy().isEmpty()) {
            whereRewrite = whereRewrite == null ? postFilter : NODE_FACTORY.and(Arrays.<ParseNode>asList(whereRewrite, postFilter));
        } else {
            havingRewrite = havingRewrite == null ? postFilter : NODE_FACTORY.and(Arrays.<ParseNode>asList(havingRewrite, postFilter));
        }
    }
    if (select.isDistinct()) {
        if (subselect.getLimit() != null || subselect.isAggregate() || subselect.isDistinct()) {
            return select;
        }
        isDistinctRewrite = true;
        orderByRewrite = null;
    }
    if (select.isAggregate()) {
        if (subselect.getLimit() != null || subselect.isAggregate() || subselect.isDistinct()) {
            return select;
        }
        isAggregateRewrite = true;
        orderByRewrite = null;
    }
    List<ParseNode> groupBy = select.getGroupBy();
    if (!groupBy.isEmpty()) {
        if (subselect.getLimit() != null || subselect.isAggregate() || subselect.isDistinct()) {
            return select;
        }
        groupByRewrite = Lists.<ParseNode>newArrayListWithExpectedSize(groupBy.size());
        for (ParseNode node : groupBy) {
            groupByRewrite.add(node.accept(this));
        }
        if (select.getHaving() != null) {
            havingRewrite = select.getHaving().accept(this);
        }
        orderByRewrite = null;
    }
    List<AliasedNode> selectNodes = select.getSelect();
    List<AliasedNode> selectNodesRewrite = Lists.newArrayListWithExpectedSize(selectNodes.size());
    for (AliasedNode aliasedNode : selectNodes) {
        ParseNode node = aliasedNode.getNode();
        if (node instanceof WildcardParseNode || (node instanceof TableWildcardParseNode && ((TableWildcardParseNode) node).getTableName().equals(tableAlias))) {
            for (AliasedNode aNode : subselect.getSelect()) {
                String alias = aNode.getAlias();
                String aliasRewrite = alias == null ? null : SchemaUtil.getColumnName(tableAlias, alias);
                selectNodesRewrite.add(NODE_FACTORY.aliasedNode(aliasRewrite, aNode.getNode()));
            }
        } else {
            selectNodesRewrite.add(NODE_FACTORY.aliasedNode(aliasedNode.getAlias(), node.accept(this)));
        }
    }
    List<OrderByNode> orderBy = select.getOrderBy();
    if (!orderBy.isEmpty()) {
        if (subselect.getLimit() != null) {
            return select;
        }
        orderByRewrite = Lists.newArrayListWithExpectedSize(orderBy.size());
        for (OrderByNode orderByNode : orderBy) {
            ParseNode node = orderByNode.getNode();
            orderByRewrite.add(NODE_FACTORY.orderBy(node.accept(this), orderByNode.isNullsLast(), orderByNode.isAscending()));
        }
    }
    OffsetNode offset = select.getOffset();
    if (offsetRewrite != null || (limitRewrite != null && offset != null)) {
        return select;
    } else {
        offsetRewrite = offset;
    }
    LimitNode limit = select.getLimit();
    if (limit != null) {
        if (limitRewrite == null) {
            limitRewrite = limit;
        } else {
            Integer limitValue = LimitCompiler.compile(null, select);
            Integer limitValueSubselect = LimitCompiler.compile(null, subselect);
            if (limitValue != null && limitValueSubselect != null) {
                limitRewrite = limitValue < limitValueSubselect ? limit : limitRewrite;
            } else {
                return select;
            }
        }
    }
    HintNode hint = select.getHint();
    if (hint != null) {
        hintRewrite = hintRewrite == null ? hint : HintNode.combine(hint, hintRewrite);
    }
    SelectStatement stmt = NODE_FACTORY.select(subselect.getFrom(), hintRewrite, isDistinctRewrite, selectNodesRewrite, whereRewrite, groupByRewrite, havingRewrite, orderByRewrite, limitRewrite, offsetRewrite, select.getBindCount(), isAggregateRewrite, select.hasSequence(), select.getSelects(), select.getUdfParseNodes());
    if (tableAlias != null) {
        this.removeAlias = true;
        stmt = ParseNodeRewriter.rewrite(stmt, this);
    }
    return stmt;
}
Also used : OffsetNode(org.apache.phoenix.parse.OffsetNode) TableWildcardParseNode(org.apache.phoenix.parse.TableWildcardParseNode) OrderByNode(org.apache.phoenix.parse.OrderByNode) WildcardParseNode(org.apache.phoenix.parse.WildcardParseNode) TableWildcardParseNode(org.apache.phoenix.parse.TableWildcardParseNode) AliasedNode(org.apache.phoenix.parse.AliasedNode) SelectStatement(org.apache.phoenix.parse.SelectStatement) LimitNode(org.apache.phoenix.parse.LimitNode) HintNode(org.apache.phoenix.parse.HintNode) WildcardParseNode(org.apache.phoenix.parse.WildcardParseNode) ColumnParseNode(org.apache.phoenix.parse.ColumnParseNode) TableWildcardParseNode(org.apache.phoenix.parse.TableWildcardParseNode) ParseNode(org.apache.phoenix.parse.ParseNode)

Example 2 with HintNode

use of org.apache.phoenix.parse.HintNode in project phoenix by apache.

the class QueryUtil method constructSelectStatement.

/**
 * @param fullTableName name of the table for which the select statement needs to be created.
 * @param columns list of columns to be projected in the select statement.
 * @param conditions The condition clause to be added to the WHERE condition
 * @param hint hint to use
 * @param escapeCols whether to escape the projected columns
 * @return Select Query
 */
public static String constructSelectStatement(String fullTableName, List<String> columns, final String conditions, Hint hint, boolean escapeCols) {
    Preconditions.checkNotNull(fullTableName, "Table name cannot be null");
    if (columns == null || columns.isEmpty()) {
        throw new IllegalArgumentException("At least one column must be provided");
    }
    StringBuilder query = new StringBuilder();
    query.append("SELECT ");
    String hintStr = "";
    if (hint != null) {
        final HintNode node = new HintNode(hint.name());
        hintStr = node.toString();
    }
    query.append(hintStr);
    for (String col : columns) {
        if (col != null) {
            String fullColumnName = col;
            if (escapeCols) {
                fullColumnName = getEscapedFullColumnName(col);
            }
            query.append(fullColumnName);
            query.append(",");
        }
    }
    // Remove the trailing comma
    query.setLength(query.length() - 1);
    query.append(" FROM ");
    query.append(fullTableName);
    if (conditions != null && conditions.length() > 0) {
        query.append(" WHERE (").append(conditions).append(")");
    }
    return query.toString();
}
Also used : HintNode(org.apache.phoenix.parse.HintNode)

Example 3 with HintNode

use of org.apache.phoenix.parse.HintNode in project phoenix by apache.

the class UpsertCompiler method compile.

public MutationPlan compile(UpsertStatement upsert) throws SQLException {
    final PhoenixConnection connection = statement.getConnection();
    ConnectionQueryServices services = connection.getQueryServices();
    final int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
    final int maxSizeBytes = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES);
    List<ColumnName> columnNodes = upsert.getColumns();
    TableRef tableRefToBe = null;
    PTable table = null;
    Set<PColumn> addViewColumnsToBe = Collections.emptySet();
    Set<PColumn> overlapViewColumnsToBe = Collections.emptySet();
    List<PColumn> allColumnsToBe = Collections.emptyList();
    boolean isTenantSpecific = false;
    boolean isSharedViewIndex = false;
    String tenantIdStr = null;
    ColumnResolver resolver = null;
    int[] columnIndexesToBe;
    int nColumnsToSet = 0;
    int[] pkSlotIndexesToBe;
    List<ParseNode> valueNodes = upsert.getValues();
    List<PColumn> targetColumns;
    NamedTableNode tableNode = upsert.getTable();
    String tableName = tableNode.getName().getTableName();
    String schemaName = tableNode.getName().getSchemaName();
    QueryPlan queryPlanToBe = null;
    int nValuesToSet;
    boolean sameTable = false;
    boolean runOnServer = false;
    boolean serverUpsertSelectEnabled = services.getProps().getBoolean(QueryServices.ENABLE_SERVER_UPSERT_SELECT, QueryServicesOptions.DEFAULT_ENABLE_SERVER_UPSERT_SELECT);
    UpsertingParallelIteratorFactory parallelIteratorFactoryToBe = null;
    boolean useServerTimestampToBe = false;
    resolver = FromCompiler.getResolverForMutation(upsert, connection);
    tableRefToBe = resolver.getTables().get(0);
    table = tableRefToBe.getTable();
    // - transactional table with a connection having an SCN
    if (table.getType() == PTableType.VIEW && table.getViewType().isReadOnly()) {
        throw new ReadOnlyTableException(schemaName, tableName);
    } else if (connection.isBuildingIndex() && table.getType() != PTableType.INDEX) {
        throw new SQLExceptionInfo.Builder(SQLExceptionCode.ONLY_INDEX_UPDATABLE_AT_SCN).setSchemaName(schemaName).setTableName(tableName).build().buildException();
    } else if (table.isTransactional() && connection.getSCN() != null) {
        throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SPECIFY_SCN_FOR_TXN_TABLE).setSchemaName(schemaName).setTableName(tableName).build().buildException();
    }
    boolean isSalted = table.getBucketNum() != null;
    isTenantSpecific = table.isMultiTenant() && connection.getTenantId() != null;
    isSharedViewIndex = table.getViewIndexId() != null;
    tenantIdStr = isTenantSpecific ? connection.getTenantId().getString() : null;
    int posOffset = isSalted ? 1 : 0;
    // Setup array of column indexes parallel to values that are going to be set
    allColumnsToBe = table.getColumns();
    nColumnsToSet = 0;
    if (table.getViewType() == ViewType.UPDATABLE) {
        addViewColumnsToBe = Sets.newLinkedHashSetWithExpectedSize(allColumnsToBe.size());
        for (PColumn column : allColumnsToBe) {
            if (column.getViewConstant() != null) {
                addViewColumnsToBe.add(column);
            }
        }
    }
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    // Allow full row upsert if no columns or only dynamic ones are specified and values count match
    if (columnNodes.isEmpty() || columnNodes.size() == upsert.getTable().getDynamicColumns().size()) {
        nColumnsToSet = allColumnsToBe.size() - posOffset;
        columnIndexesToBe = new int[nColumnsToSet];
        pkSlotIndexesToBe = new int[columnIndexesToBe.length];
        targetColumns = Lists.newArrayListWithExpectedSize(columnIndexesToBe.length);
        targetColumns.addAll(Collections.<PColumn>nCopies(columnIndexesToBe.length, null));
        int minPKPos = 0;
        if (isSharedViewIndex) {
            PColumn indexIdColumn = table.getPKColumns().get(minPKPos);
            columnIndexesToBe[minPKPos] = indexIdColumn.getPosition();
            targetColumns.set(minPKPos, indexIdColumn);
            minPKPos++;
        }
        if (isTenantSpecific) {
            PColumn tenantColumn = table.getPKColumns().get(minPKPos);
            columnIndexesToBe[minPKPos] = tenantColumn.getPosition();
            targetColumns.set(minPKPos, tenantColumn);
            minPKPos++;
        }
        for (int i = posOffset, j = 0; i < allColumnsToBe.size(); i++) {
            PColumn column = allColumnsToBe.get(i);
            if (SchemaUtil.isPKColumn(column)) {
                pkSlotIndexesToBe[i - posOffset] = j + posOffset;
                if (j++ < minPKPos) {
                    // Skip, as it's already been set above
                    continue;
                }
                minPKPos = 0;
            }
            columnIndexesToBe[i - posOffset + minPKPos] = i;
            targetColumns.set(i - posOffset + minPKPos, column);
        }
        if (!addViewColumnsToBe.isEmpty()) {
            // All view columns overlap in this case
            overlapViewColumnsToBe = addViewColumnsToBe;
            addViewColumnsToBe = Collections.emptySet();
        }
    } else {
        // Size for worse case
        int numColsInUpsert = columnNodes.size();
        nColumnsToSet = numColsInUpsert + addViewColumnsToBe.size() + (isTenantSpecific ? 1 : 0) + +(isSharedViewIndex ? 1 : 0);
        columnIndexesToBe = new int[nColumnsToSet];
        pkSlotIndexesToBe = new int[columnIndexesToBe.length];
        targetColumns = Lists.newArrayListWithExpectedSize(columnIndexesToBe.length);
        targetColumns.addAll(Collections.<PColumn>nCopies(columnIndexesToBe.length, null));
        // TODO: necessary? So we'll get an AIOB exception if it's not replaced
        Arrays.fill(columnIndexesToBe, -1);
        // TODO: necessary? So we'll get an AIOB exception if it's not replaced
        Arrays.fill(pkSlotIndexesToBe, -1);
        BitSet columnsBeingSet = new BitSet(table.getColumns().size());
        int i = 0;
        if (isSharedViewIndex) {
            PColumn indexIdColumn = table.getPKColumns().get(i + posOffset);
            columnsBeingSet.set(columnIndexesToBe[i] = indexIdColumn.getPosition());
            pkSlotIndexesToBe[i] = i + posOffset;
            targetColumns.set(i, indexIdColumn);
            i++;
        }
        // Add tenant column directly, as we don't want to resolve it as this will fail
        if (isTenantSpecific) {
            PColumn tenantColumn = table.getPKColumns().get(i + posOffset);
            columnsBeingSet.set(columnIndexesToBe[i] = tenantColumn.getPosition());
            pkSlotIndexesToBe[i] = i + posOffset;
            targetColumns.set(i, tenantColumn);
            i++;
        }
        for (ColumnName colName : columnNodes) {
            ColumnRef ref = resolver.resolveColumn(null, colName.getFamilyName(), colName.getColumnName());
            PColumn column = ref.getColumn();
            if (IndexUtil.getViewConstantValue(column, ptr)) {
                if (overlapViewColumnsToBe.isEmpty()) {
                    overlapViewColumnsToBe = Sets.newHashSetWithExpectedSize(addViewColumnsToBe.size());
                }
                nColumnsToSet--;
                overlapViewColumnsToBe.add(column);
                addViewColumnsToBe.remove(column);
            }
            columnsBeingSet.set(columnIndexesToBe[i] = ref.getColumnPosition());
            targetColumns.set(i, column);
            if (SchemaUtil.isPKColumn(column)) {
                pkSlotIndexesToBe[i] = ref.getPKSlotPosition();
            }
            i++;
        }
        for (PColumn column : addViewColumnsToBe) {
            columnsBeingSet.set(columnIndexesToBe[i] = column.getPosition());
            targetColumns.set(i, column);
            if (SchemaUtil.isPKColumn(column)) {
                pkSlotIndexesToBe[i] = SchemaUtil.getPKPosition(table, column);
            }
            i++;
        }
        // If a table has rowtimestamp col, then we always set it.
        useServerTimestampToBe = table.getRowTimestampColPos() != -1 && !isRowTimestampSet(pkSlotIndexesToBe, table);
        if (useServerTimestampToBe) {
            PColumn rowTimestampCol = table.getPKColumns().get(table.getRowTimestampColPos());
            // Need to resize columnIndexesToBe and pkSlotIndexesToBe to include this extra column.
            columnIndexesToBe = Arrays.copyOf(columnIndexesToBe, columnIndexesToBe.length + 1);
            pkSlotIndexesToBe = Arrays.copyOf(pkSlotIndexesToBe, pkSlotIndexesToBe.length + 1);
            columnsBeingSet.set(columnIndexesToBe[i] = rowTimestampCol.getPosition());
            pkSlotIndexesToBe[i] = table.getRowTimestampColPos();
            targetColumns.add(rowTimestampCol);
            if (valueNodes != null && !valueNodes.isEmpty()) {
                valueNodes.add(getNodeForRowTimestampColumn(rowTimestampCol));
            }
            nColumnsToSet++;
        }
        for (i = posOffset; i < table.getColumns().size(); i++) {
            PColumn column = table.getColumns().get(i);
            if (!columnsBeingSet.get(i) && !column.isNullable() && column.getExpressionStr() == null) {
                throw new ConstraintViolationException(SchemaUtil.getColumnDisplayName(column) + " may not be null");
            }
        }
    }
    boolean isAutoCommit = connection.getAutoCommit();
    if (valueNodes == null) {
        SelectStatement select = upsert.getSelect();
        assert (select != null);
        select = SubselectRewriter.flatten(select, connection);
        ColumnResolver selectResolver = FromCompiler.getResolverForQuery(select, connection, false, upsert.getTable().getName());
        select = StatementNormalizer.normalize(select, selectResolver);
        select = prependTenantAndViewConstants(table, select, tenantIdStr, addViewColumnsToBe, useServerTimestampToBe);
        SelectStatement transformedSelect = SubqueryRewriter.transform(select, selectResolver, connection);
        if (transformedSelect != select) {
            selectResolver = FromCompiler.getResolverForQuery(transformedSelect, connection, false, upsert.getTable().getName());
            select = StatementNormalizer.normalize(transformedSelect, selectResolver);
        }
        sameTable = !select.isJoin() && tableRefToBe.equals(selectResolver.getTables().get(0));
        /* We can run the upsert in a coprocessor if:
             * 1) from has only 1 table or server UPSERT SELECT is enabled
             * 2) the select query isn't doing aggregation (which requires a client-side final merge)
             * 3) autoCommit is on
             * 4) the table is not immutable with indexes, as the client is the one that figures out the additional
             *    puts for index tables.
             * 5) no limit clause, as the limit clause requires client-side post processing
             * 6) no sequences, as sequences imply that the order of upsert must match the order of
             *    selection. TODO: change this and only force client side if there's a ORDER BY on the sequence value
             * Otherwise, run the query to pull the data from the server
             * and populate the MutationState (upto a limit).
            */
        if (!(select.isAggregate() || select.isDistinct() || select.getLimit() != null || select.hasSequence())) {
            // We can pipeline the upsert select instead of spooling everything to disk first,
            // if we don't have any post processing that's required.
            parallelIteratorFactoryToBe = new UpsertingParallelIteratorFactory(connection, tableRefToBe, useServerTimestampToBe);
            // If we're in the else, then it's not an aggregate, distinct, limited, or sequence using query,
            // so we might be able to run it entirely on the server side.
            // region space managed by region servers. So we bail out on executing on server side.
            // Disable running upsert select on server side if a table has global mutable secondary indexes on it
            boolean hasGlobalMutableIndexes = SchemaUtil.hasGlobalIndex(table) && !table.isImmutableRows();
            boolean hasWhereSubquery = select.getWhere() != null && select.getWhere().hasSubquery();
            runOnServer = (sameTable || (serverUpsertSelectEnabled && !hasGlobalMutableIndexes)) && isAutoCommit && !table.isTransactional() && !(table.isImmutableRows() && !table.getIndexes().isEmpty()) && !select.isJoin() && !hasWhereSubquery && table.getRowTimestampColPos() == -1;
        }
        // If we may be able to run on the server, add a hint that favors using the data table
        // if all else is equal.
        // TODO: it'd be nice if we could figure out in advance if the PK is potentially changing,
        // as this would disallow running on the server. We currently use the row projector we
        // get back to figure this out.
        HintNode hint = upsert.getHint();
        if (!upsert.getHint().hasHint(Hint.USE_INDEX_OVER_DATA_TABLE)) {
            hint = HintNode.create(hint, Hint.USE_DATA_OVER_INDEX_TABLE);
        }
        select = SelectStatement.create(select, hint);
        // Pass scan through if same table in upsert and select so that projection is computed correctly
        // Use optimizer to choose the best plan
        QueryCompiler compiler = new QueryCompiler(statement, select, selectResolver, targetColumns, parallelIteratorFactoryToBe, new SequenceManager(statement), false, false, null);
        queryPlanToBe = compiler.compile();
        // steps and parallelIteratorFactory did not take effect.
        if (queryPlanToBe.getTableRef().getTable().getType() == PTableType.PROJECTED || queryPlanToBe.getTableRef().getTable().getType() == PTableType.SUBQUERY) {
            parallelIteratorFactoryToBe = null;
        }
        nValuesToSet = queryPlanToBe.getProjector().getColumnCount();
    // Cannot auto commit if doing aggregation or topN or salted
    // Salted causes problems because the row may end up living on a different region
    } else {
        nValuesToSet = valueNodes.size() + addViewColumnsToBe.size() + (isTenantSpecific ? 1 : 0) + (isSharedViewIndex ? 1 : 0);
    }
    // Resize down to allow a subset of columns to be specifiable
    if (columnNodes.isEmpty() && columnIndexesToBe.length >= nValuesToSet) {
        nColumnsToSet = nValuesToSet;
        columnIndexesToBe = Arrays.copyOf(columnIndexesToBe, nValuesToSet);
        pkSlotIndexesToBe = Arrays.copyOf(pkSlotIndexesToBe, nValuesToSet);
        for (int i = posOffset + nValuesToSet; i < table.getColumns().size(); i++) {
            PColumn column = table.getColumns().get(i);
            if (!column.isNullable() && column.getExpressionStr() == null) {
                throw new ConstraintViolationException(SchemaUtil.getColumnDisplayName(column) + " may not be null");
            }
        }
    }
    if (nValuesToSet != nColumnsToSet) {
        // been removed and the added back and we wouldn't detect that here.
        throw new UpsertColumnsValuesMismatchException(schemaName, tableName, "Numbers of columns: " + nColumnsToSet + ". Number of values: " + nValuesToSet);
    }
    final QueryPlan originalQueryPlan = queryPlanToBe;
    RowProjector projectorToBe = null;
    // Optimize only after all checks have been performed
    if (valueNodes == null) {
        queryPlanToBe = new QueryOptimizer(services).optimize(queryPlanToBe, statement, targetColumns, parallelIteratorFactoryToBe);
        projectorToBe = queryPlanToBe.getProjector();
    }
    final List<PColumn> allColumns = allColumnsToBe;
    final RowProjector projector = projectorToBe;
    final QueryPlan queryPlan = queryPlanToBe;
    final TableRef tableRef = tableRefToBe;
    final Set<PColumn> addViewColumns = addViewColumnsToBe;
    final Set<PColumn> overlapViewColumns = overlapViewColumnsToBe;
    final UpsertingParallelIteratorFactory parallelIteratorFactory = parallelIteratorFactoryToBe;
    final int[] columnIndexes = columnIndexesToBe;
    final int[] pkSlotIndexes = pkSlotIndexesToBe;
    final boolean useServerTimestamp = useServerTimestampToBe;
    if (table.getRowTimestampColPos() == -1 && useServerTimestamp) {
        throw new IllegalStateException("For a table without row timestamp column, useServerTimestamp cannot be true");
    }
    // ///////////////////////////////////////////////////////////////////
    if (valueNodes == null) {
        // Before we re-order, check that for updatable view columns
        // the projected expression either matches the column name or
        // is a constant with the same required value.
        throwIfNotUpdatable(tableRef, overlapViewColumnsToBe, targetColumns, projector, sameTable);
        // ///////////////////////////////////////////////////////////////////
        if (runOnServer) {
            // At most this array will grow bigger by the number of PK columns
            int[] allColumnsIndexes = Arrays.copyOf(columnIndexes, columnIndexes.length + nValuesToSet);
            int[] reverseColumnIndexes = new int[table.getColumns().size()];
            List<Expression> projectedExpressions = Lists.newArrayListWithExpectedSize(reverseColumnIndexes.length);
            Arrays.fill(reverseColumnIndexes, -1);
            for (int i = 0; i < nValuesToSet; i++) {
                projectedExpressions.add(projector.getColumnProjector(i).getExpression());
                reverseColumnIndexes[columnIndexes[i]] = i;
            }
            /*
                 * Order projected columns and projected expressions with PK columns
                 * leading order by slot position
                 */
            int offset = table.getBucketNum() == null ? 0 : 1;
            for (int i = 0; i < table.getPKColumns().size() - offset; i++) {
                PColumn column = table.getPKColumns().get(i + offset);
                int pos = reverseColumnIndexes[column.getPosition()];
                if (pos == -1) {
                    // it's not valid to set a fixed width type to null.
                    if (column.getDataType().isFixedWidth()) {
                        continue;
                    }
                    // Add literal null for missing PK columns
                    pos = projectedExpressions.size();
                    Expression literalNull = LiteralExpression.newConstant(null, column.getDataType(), Determinism.ALWAYS);
                    projectedExpressions.add(literalNull);
                    allColumnsIndexes[pos] = column.getPosition();
                }
                // Swap select expression at pos with i
                Collections.swap(projectedExpressions, i, pos);
                // Swap column indexes and reverse column indexes too
                int tempPos = allColumnsIndexes[i];
                allColumnsIndexes[i] = allColumnsIndexes[pos];
                allColumnsIndexes[pos] = tempPos;
                reverseColumnIndexes[tempPos] = pos;
                reverseColumnIndexes[i] = i;
            }
            // If any pk slots are changing and server side UPSERT SELECT is disabled, do not run on server
            if (!serverUpsertSelectEnabled && ExpressionUtil.isPkPositionChanging(new TableRef(table), projectedExpressions)) {
                runOnServer = false;
            }
            // ///////////////////////////////////////////////////////////////////
            if (runOnServer) {
                // Iterate through columns being projected
                List<PColumn> projectedColumns = Lists.newArrayListWithExpectedSize(projectedExpressions.size());
                int posOff = table.getBucketNum() != null ? 1 : 0;
                for (int i = 0; i < projectedExpressions.size(); i++) {
                    // Must make new column if position has changed
                    PColumn column = allColumns.get(allColumnsIndexes[i]);
                    projectedColumns.add(column.getPosition() == i + posOff ? column : new PColumnImpl(column, i + posOff));
                }
                // Build table from projectedColumns
                // Hack to add default column family to be used on server in case no value column is projected.
                PTable projectedTable = PTableImpl.makePTable(table, projectedColumns, PNameFactory.newName(SchemaUtil.getEmptyColumnFamily(table)));
                SelectStatement select = SelectStatement.create(SelectStatement.COUNT_ONE, upsert.getHint());
                StatementContext statementContext = queryPlan.getContext();
                RowProjector aggProjectorToBe = ProjectionCompiler.compile(statementContext, select, GroupBy.EMPTY_GROUP_BY);
                statementContext.getAggregationManager().compile(queryPlan.getContext(), GroupBy.EMPTY_GROUP_BY);
                if (queryPlan.getProjector().projectEveryRow()) {
                    aggProjectorToBe = new RowProjector(aggProjectorToBe, true);
                }
                final RowProjector aggProjector = aggProjectorToBe;
                /*
                     * Transfer over PTable representing subset of columns selected, but all PK columns.
                     * Move columns setting PK first in pkSlot order, adding LiteralExpression of null for any missing ones.
                     * Transfer over List<Expression> for projection.
                     * In region scan, evaluate expressions in order, collecting first n columns for PK and collection non PK in mutation Map
                     * Create the PRow and get the mutations, adding them to the batch
                     */
                final StatementContext context = queryPlan.getContext();
                final Scan scan = context.getScan();
                scan.setAttribute(BaseScannerRegionObserver.UPSERT_SELECT_TABLE, UngroupedAggregateRegionObserver.serialize(projectedTable));
                scan.setAttribute(BaseScannerRegionObserver.UPSERT_SELECT_EXPRS, UngroupedAggregateRegionObserver.serialize(projectedExpressions));
                // Ignore order by - it has no impact
                final QueryPlan aggPlan = new AggregatePlan(context, select, statementContext.getCurrentTable(), aggProjector, null, null, OrderBy.EMPTY_ORDER_BY, null, GroupBy.EMPTY_GROUP_BY, null, originalQueryPlan);
                return new ServerUpsertSelectMutationPlan(queryPlan, tableRef, originalQueryPlan, context, connection, scan, aggPlan, aggProjector, maxSize, maxSizeBytes);
            }
        }
        // ///////////////////////////////////////////////////////////////////
        return new ClientUpsertSelectMutationPlan(queryPlan, tableRef, originalQueryPlan, parallelIteratorFactory, projector, columnIndexes, pkSlotIndexes, useServerTimestamp, maxSize, maxSizeBytes);
    }
    // //////////////////////////////////////////////////////////////////
    // UPSERT VALUES
    // ///////////////////////////////////////////////////////////////////
    final byte[][] values = new byte[nValuesToSet][];
    int nodeIndex = 0;
    if (isSharedViewIndex) {
        values[nodeIndex++] = MetaDataUtil.getViewIndexIdDataType().toBytes(table.getViewIndexId());
    }
    if (isTenantSpecific) {
        PName tenantId = connection.getTenantId();
        values[nodeIndex++] = ScanUtil.getTenantIdBytes(table.getRowKeySchema(), table.getBucketNum() != null, tenantId, isSharedViewIndex);
    }
    final int nodeIndexOffset = nodeIndex;
    // Allocate array based on size of all columns in table,
    // since some values may not be set (if they're nullable).
    final StatementContext context = new StatementContext(statement, resolver, new Scan(), new SequenceManager(statement));
    UpsertValuesCompiler expressionBuilder = new UpsertValuesCompiler(context);
    final List<Expression> constantExpressions = Lists.newArrayListWithExpectedSize(valueNodes.size());
    // and initialize them in one batch
    for (ParseNode valueNode : valueNodes) {
        if (!valueNode.isStateless()) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.VALUE_IN_UPSERT_NOT_CONSTANT).build().buildException();
        }
        PColumn column = allColumns.get(columnIndexes[nodeIndex]);
        expressionBuilder.setColumn(column);
        Expression expression = valueNode.accept(expressionBuilder);
        if (expression.getDataType() != null && !expression.getDataType().isCastableTo(column.getDataType())) {
            throw TypeMismatchException.newException(expression.getDataType(), column.getDataType(), "expression: " + expression.toString() + " in column " + column);
        }
        constantExpressions.add(expression);
        nodeIndex++;
    }
    byte[] onDupKeyBytesToBe = null;
    List<Pair<ColumnName, ParseNode>> onDupKeyPairs = upsert.getOnDupKeyPairs();
    if (onDupKeyPairs != null) {
        if (table.isImmutableRows()) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_USE_ON_DUP_KEY_FOR_IMMUTABLE).setSchemaName(table.getSchemaName().getString()).setTableName(table.getTableName().getString()).build().buildException();
        }
        if (table.isTransactional()) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_USE_ON_DUP_KEY_FOR_TRANSACTIONAL).setSchemaName(table.getSchemaName().getString()).setTableName(table.getTableName().getString()).build().buildException();
        }
        if (connection.getSCN() != null) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SET_SCN_IN_ON_DUP_KEY).setSchemaName(table.getSchemaName().getString()).setTableName(table.getTableName().getString()).build().buildException();
        }
        if (SchemaUtil.hasGlobalIndex(table)) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_USE_ON_DUP_KEY_WITH_GLOBAL_IDX).setSchemaName(table.getSchemaName().getString()).setTableName(table.getTableName().getString()).build().buildException();
        }
        if (onDupKeyPairs.isEmpty()) {
            // ON DUPLICATE KEY IGNORE
            onDupKeyBytesToBe = PhoenixIndexBuilder.serializeOnDupKeyIgnore();
        } else {
            // ON DUPLICATE KEY UPDATE;
            int position = table.getBucketNum() == null ? 0 : 1;
            UpdateColumnCompiler compiler = new UpdateColumnCompiler(context);
            int nColumns = onDupKeyPairs.size();
            List<Expression> updateExpressions = Lists.newArrayListWithExpectedSize(nColumns);
            LinkedHashSet<PColumn> updateColumns = Sets.newLinkedHashSetWithExpectedSize(nColumns + 1);
            updateColumns.add(new PColumnImpl(// Use first PK column name as we know it won't conflict with others
            table.getPKColumns().get(position).getName(), null, PVarbinary.INSTANCE, null, null, false, position, SortOrder.getDefault(), 0, null, false, null, false, false, null));
            position++;
            for (Pair<ColumnName, ParseNode> columnPair : onDupKeyPairs) {
                ColumnName colName = columnPair.getFirst();
                PColumn updateColumn = resolver.resolveColumn(null, colName.getFamilyName(), colName.getColumnName()).getColumn();
                if (SchemaUtil.isPKColumn(updateColumn)) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_UPDATE_PK_ON_DUP_KEY).setSchemaName(table.getSchemaName().getString()).setTableName(table.getTableName().getString()).setColumnName(updateColumn.getName().getString()).build().buildException();
                }
                final int columnPosition = position++;
                if (!updateColumns.add(new DelegateColumn(updateColumn) {

                    @Override
                    public int getPosition() {
                        return columnPosition;
                    }
                })) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.DUPLICATE_COLUMN_IN_ON_DUP_KEY).setSchemaName(table.getSchemaName().getString()).setTableName(table.getTableName().getString()).setColumnName(updateColumn.getName().getString()).build().buildException();
                }
                ;
                ParseNode updateNode = columnPair.getSecond();
                compiler.setColumn(updateColumn);
                Expression updateExpression = updateNode.accept(compiler);
                // Check that updateExpression is coercible to updateColumn
                if (updateExpression.getDataType() != null && !updateExpression.getDataType().isCastableTo(updateColumn.getDataType())) {
                    throw TypeMismatchException.newException(updateExpression.getDataType(), updateColumn.getDataType(), "expression: " + updateExpression.toString() + " for column " + updateColumn);
                }
                if (compiler.isAggregate()) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.AGGREGATION_NOT_ALLOWED_IN_ON_DUP_KEY).setSchemaName(table.getSchemaName().getString()).setTableName(table.getTableName().getString()).setColumnName(updateColumn.getName().getString()).build().buildException();
                }
                updateExpressions.add(updateExpression);
            }
            PTable onDupKeyTable = PTableImpl.makePTable(table, updateColumns);
            onDupKeyBytesToBe = PhoenixIndexBuilder.serializeOnDupKeyUpdate(onDupKeyTable, updateExpressions);
        }
    }
    final byte[] onDupKeyBytes = onDupKeyBytesToBe;
    return new UpsertValuesMutationPlan(context, tableRef, nodeIndexOffset, constantExpressions, allColumns, columnIndexes, overlapViewColumns, values, addViewColumns, connection, pkSlotIndexes, useServerTimestamp, onDupKeyBytes, maxSize, maxSizeBytes);
}
Also used : PhoenixIndexBuilder(org.apache.phoenix.index.PhoenixIndexBuilder) PTable(org.apache.phoenix.schema.PTable) DelegateColumn(org.apache.phoenix.schema.DelegateColumn) LiteralParseNode(org.apache.phoenix.parse.LiteralParseNode) BindParseNode(org.apache.phoenix.parse.BindParseNode) SequenceValueParseNode(org.apache.phoenix.parse.SequenceValueParseNode) ParseNode(org.apache.phoenix.parse.ParseNode) ConstraintViolationException(org.apache.phoenix.schema.ConstraintViolationException) AggregatePlan(org.apache.phoenix.execute.AggregatePlan) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) BitSet(java.util.BitSet) ReadOnlyTableException(org.apache.phoenix.schema.ReadOnlyTableException) HintNode(org.apache.phoenix.parse.HintNode) Scan(org.apache.hadoop.hbase.client.Scan) ColumnRef(org.apache.phoenix.schema.ColumnRef) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) PColumnImpl(org.apache.phoenix.schema.PColumnImpl) PColumn(org.apache.phoenix.schema.PColumn) SelectStatement(org.apache.phoenix.parse.SelectStatement) UpsertColumnsValuesMismatchException(org.apache.phoenix.schema.UpsertColumnsValuesMismatchException) Pair(org.apache.hadoop.hbase.util.Pair) QueryOptimizer(org.apache.phoenix.optimize.QueryOptimizer) Hint(org.apache.phoenix.parse.HintNode.Hint) PSmallint(org.apache.phoenix.schema.types.PSmallint) ColumnName(org.apache.phoenix.parse.ColumnName) Expression(org.apache.phoenix.expression.Expression) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) PName(org.apache.phoenix.schema.PName) NamedTableNode(org.apache.phoenix.parse.NamedTableNode) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices) TableRef(org.apache.phoenix.schema.TableRef)

Example 4 with HintNode

use of org.apache.phoenix.parse.HintNode in project phoenix by apache.

the class DeleteCompiler method compile.

public MutationPlan compile(DeleteStatement delete) throws SQLException {
    final PhoenixConnection connection = statement.getConnection();
    final boolean isAutoCommit = connection.getAutoCommit();
    final boolean hasPostProcessing = delete.getLimit() != null;
    final ConnectionQueryServices services = connection.getQueryServices();
    List<QueryPlan> queryPlans;
    NamedTableNode tableNode = delete.getTable();
    String tableName = tableNode.getName().getTableName();
    String schemaName = tableNode.getName().getSchemaName();
    SelectStatement select = null;
    ColumnResolver resolverToBe = null;
    DeletingParallelIteratorFactory parallelIteratorFactoryToBe;
    resolverToBe = FromCompiler.getResolverForMutation(delete, connection);
    final TableRef targetTableRef = resolverToBe.getTables().get(0);
    PTable table = targetTableRef.getTable();
    // TODO: SchemaUtil.isReadOnly(PTable, connection)?
    if (table.getType() == PTableType.VIEW && table.getViewType().isReadOnly()) {
        throw new ReadOnlyTableException(schemaName, tableName);
    } else if (table.isTransactional() && connection.getSCN() != null) {
        throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SPECIFY_SCN_FOR_TXN_TABLE).setSchemaName(schemaName).setTableName(tableName).build().buildException();
    }
    List<PTable> clientSideIndexes = getClientSideMaintainedIndexes(targetTableRef);
    final boolean hasClientSideIndexes = !clientSideIndexes.isEmpty();
    boolean isSalted = table.getBucketNum() != null;
    boolean isMultiTenant = connection.getTenantId() != null && table.isMultiTenant();
    boolean isSharedViewIndex = table.getViewIndexId() != null;
    int pkColumnOffset = (isSalted ? 1 : 0) + (isMultiTenant ? 1 : 0) + (isSharedViewIndex ? 1 : 0);
    final int pkColumnCount = table.getPKColumns().size() - pkColumnOffset;
    int selectColumnCount = pkColumnCount;
    for (PTable index : clientSideIndexes) {
        selectColumnCount += index.getPKColumns().size() - pkColumnCount;
    }
    Set<PColumn> projectedColumns = new LinkedHashSet<PColumn>(selectColumnCount + pkColumnOffset);
    List<AliasedNode> aliasedNodes = Lists.newArrayListWithExpectedSize(selectColumnCount);
    for (int i = isSalted ? 1 : 0; i < pkColumnOffset; i++) {
        PColumn column = table.getPKColumns().get(i);
        projectedColumns.add(column);
    }
    for (int i = pkColumnOffset; i < table.getPKColumns().size(); i++) {
        PColumn column = table.getPKColumns().get(i);
        projectedColumns.add(column);
        aliasedNodes.add(FACTORY.aliasedNode(null, FACTORY.column(null, '"' + column.getName().getString() + '"', null)));
    }
    // Project all non PK indexed columns so that we can do the proper index maintenance
    for (PTable index : table.getIndexes()) {
        IndexMaintainer maintainer = index.getIndexMaintainer(table, connection);
        // Go through maintainer as it handles functional indexes correctly
        for (Pair<String, String> columnInfo : maintainer.getIndexedColumnInfo()) {
            String familyName = columnInfo.getFirst();
            if (familyName != null) {
                String columnName = columnInfo.getSecond();
                boolean hasNoColumnFamilies = table.getColumnFamilies().isEmpty();
                PColumn column = hasNoColumnFamilies ? table.getColumnForColumnName(columnName) : table.getColumnFamily(familyName).getPColumnForColumnName(columnName);
                if (!projectedColumns.contains(column)) {
                    projectedColumns.add(column);
                    aliasedNodes.add(FACTORY.aliasedNode(null, FACTORY.column(hasNoColumnFamilies ? null : TableName.create(null, familyName), '"' + columnName + '"', null)));
                }
            }
        }
    }
    select = FACTORY.select(delete.getTable(), delete.getHint(), false, aliasedNodes, delete.getWhere(), Collections.<ParseNode>emptyList(), null, delete.getOrderBy(), delete.getLimit(), null, delete.getBindCount(), false, false, Collections.<SelectStatement>emptyList(), delete.getUdfParseNodes());
    select = StatementNormalizer.normalize(select, resolverToBe);
    SelectStatement transformedSelect = SubqueryRewriter.transform(select, resolverToBe, connection);
    boolean hasPreProcessing = transformedSelect != select;
    if (transformedSelect != select) {
        resolverToBe = FromCompiler.getResolverForQuery(transformedSelect, connection, false, delete.getTable().getName());
        select = StatementNormalizer.normalize(transformedSelect, resolverToBe);
    }
    final boolean hasPreOrPostProcessing = hasPreProcessing || hasPostProcessing;
    boolean noQueryReqd = !hasPreOrPostProcessing;
    // No limit and no sub queries, joins, etc in where clause
    // Can't run on same server for transactional data, as we need the row keys for the data
    // that is being upserted for conflict detection purposes.
    // If we have immutable indexes, we'd increase the number of bytes scanned by executing
    // separate queries against each index, so better to drive from a single table in that case.
    boolean runOnServer = isAutoCommit && !hasPreOrPostProcessing && !table.isTransactional() && !hasClientSideIndexes;
    HintNode hint = delete.getHint();
    if (runOnServer && !delete.getHint().hasHint(Hint.USE_INDEX_OVER_DATA_TABLE)) {
        select = SelectStatement.create(select, HintNode.create(hint, Hint.USE_DATA_OVER_INDEX_TABLE));
    }
    parallelIteratorFactoryToBe = hasPreOrPostProcessing ? null : new DeletingParallelIteratorFactory(connection);
    QueryOptimizer optimizer = new QueryOptimizer(services);
    QueryCompiler compiler = new QueryCompiler(statement, select, resolverToBe, Collections.<PColumn>emptyList(), parallelIteratorFactoryToBe, new SequenceManager(statement));
    final QueryPlan dataPlan = compiler.compile();
    // TODO: the select clause should know that there's a sub query, but doesn't seem to currently
    queryPlans = Lists.newArrayList(!clientSideIndexes.isEmpty() ? optimizer.getApplicablePlans(dataPlan, statement, select, resolverToBe, Collections.<PColumn>emptyList(), parallelIteratorFactoryToBe) : optimizer.getBestPlan(dataPlan, statement, select, resolverToBe, Collections.<PColumn>emptyList(), parallelIteratorFactoryToBe));
    // Filter out any local indexes that don't contain all indexed columns.
    // We have to do this manually because local indexes are still used
    // when referenced columns aren't in the index, so they won't be
    // filtered by the optimizer.
    queryPlans = new ArrayList<>(queryPlans);
    Iterator<QueryPlan> iterator = queryPlans.iterator();
    while (iterator.hasNext()) {
        QueryPlan plan = iterator.next();
        if (plan.getTableRef().getTable().getIndexType() == IndexType.LOCAL) {
            if (!plan.getContext().getDataColumns().isEmpty()) {
                iterator.remove();
            }
        }
    }
    if (queryPlans.isEmpty()) {
        queryPlans = Collections.singletonList(dataPlan);
    }
    runOnServer &= queryPlans.get(0).getTableRef().getTable().getType() != PTableType.INDEX;
    // We need to have all indexed columns available in all immutable indexes in order
    // to generate the delete markers from the query. We also cannot have any filters
    // except for our SkipScanFilter for point lookups.
    // A simple check of the non existence of a where clause in the parse node is not sufficient, as the where clause
    // may have been optimized out. Instead, we check that there's a single SkipScanFilter
    // If we can generate a plan for every index, that means all the required columns are available in every index,
    // hence we can drive the delete from any of the plans.
    noQueryReqd &= queryPlans.size() == 1 + clientSideIndexes.size();
    int queryPlanIndex = 0;
    while (noQueryReqd && queryPlanIndex < queryPlans.size()) {
        QueryPlan plan = queryPlans.get(queryPlanIndex++);
        StatementContext context = plan.getContext();
        noQueryReqd &= (!context.getScan().hasFilter() || context.getScan().getFilter() instanceof SkipScanFilter) && context.getScanRanges().isPointLookup();
    }
    final int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
    final int maxSizeBytes = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES);
    // If we're doing a query for a set of rows with no where clause, then we don't need to contact the server at all.
    if (noQueryReqd) {
        // Create a mutationPlan for each queryPlan. One plan will be for the deletion of the rows
        // from the data table, while the others will be for deleting rows from immutable indexes.
        List<MutationPlan> mutationPlans = Lists.newArrayListWithExpectedSize(queryPlans.size());
        for (final QueryPlan plan : queryPlans) {
            mutationPlans.add(new SingleRowDeleteMutationPlan(plan, connection, maxSize, maxSizeBytes));
        }
        return new MultiRowDeleteMutationPlan(dataPlan, mutationPlans);
    } else if (runOnServer) {
        // TODO: better abstraction
        final StatementContext context = dataPlan.getContext();
        Scan scan = context.getScan();
        scan.setAttribute(BaseScannerRegionObserver.DELETE_AGG, QueryConstants.TRUE);
        // Build an ungrouped aggregate query: select COUNT(*) from <table> where <where>
        // The coprocessor will delete each row returned from the scan
        // Ignoring ORDER BY, since with auto commit on and no limit makes no difference
        SelectStatement aggSelect = SelectStatement.create(SelectStatement.COUNT_ONE, delete.getHint());
        RowProjector projectorToBe = ProjectionCompiler.compile(context, aggSelect, GroupBy.EMPTY_GROUP_BY);
        context.getAggregationManager().compile(context, GroupBy.EMPTY_GROUP_BY);
        if (dataPlan.getProjector().projectEveryRow()) {
            projectorToBe = new RowProjector(projectorToBe, true);
        }
        final RowProjector projector = projectorToBe;
        final QueryPlan aggPlan = new AggregatePlan(context, select, dataPlan.getTableRef(), projector, null, null, OrderBy.EMPTY_ORDER_BY, null, GroupBy.EMPTY_GROUP_BY, null, dataPlan);
        return new ServerSelectDeleteMutationPlan(dataPlan, connection, aggPlan, projector, maxSize, maxSizeBytes);
    } else {
        final DeletingParallelIteratorFactory parallelIteratorFactory = parallelIteratorFactoryToBe;
        List<PColumn> adjustedProjectedColumns = Lists.newArrayListWithExpectedSize(projectedColumns.size());
        final int offset = table.getBucketNum() == null ? 0 : 1;
        Iterator<PColumn> projectedColsItr = projectedColumns.iterator();
        int i = 0;
        while (projectedColsItr.hasNext()) {
            final int position = i++;
            adjustedProjectedColumns.add(new DelegateColumn(projectedColsItr.next()) {

                @Override
                public int getPosition() {
                    return position + offset;
                }
            });
        }
        PTable projectedTable = PTableImpl.makePTable(table, PTableType.PROJECTED, adjustedProjectedColumns);
        final TableRef projectedTableRef = new TableRef(projectedTable, targetTableRef.getLowerBoundTimeStamp(), targetTableRef.getTimeStamp());
        QueryPlan bestPlanToBe = dataPlan;
        for (QueryPlan plan : queryPlans) {
            PTable planTable = plan.getTableRef().getTable();
            if (planTable.getIndexState() != PIndexState.BUILDING) {
                bestPlanToBe = plan;
                break;
            }
        }
        final QueryPlan bestPlan = bestPlanToBe;
        final List<TableRef> otherTableRefs = Lists.newArrayListWithExpectedSize(clientSideIndexes.size());
        for (PTable index : clientSideIndexes) {
            if (!bestPlan.getTableRef().getTable().equals(index)) {
                otherTableRefs.add(new TableRef(index, targetTableRef.getLowerBoundTimeStamp(), targetTableRef.getTimeStamp()));
            }
        }
        if (!bestPlan.getTableRef().getTable().equals(targetTableRef.getTable())) {
            otherTableRefs.add(projectedTableRef);
        }
        return new ClientSelectDeleteMutationPlan(targetTableRef, dataPlan, bestPlan, hasPreOrPostProcessing, parallelIteratorFactory, otherTableRefs, projectedTableRef, maxSize, maxSizeBytes, connection);
    }
}
Also used : LinkedHashSet(java.util.LinkedHashSet) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) PTable(org.apache.phoenix.schema.PTable) PColumn(org.apache.phoenix.schema.PColumn) SelectStatement(org.apache.phoenix.parse.SelectStatement) IndexMaintainer(org.apache.phoenix.index.IndexMaintainer) ResultIterator(org.apache.phoenix.iterate.ResultIterator) Iterator(java.util.Iterator) DelegateColumn(org.apache.phoenix.schema.DelegateColumn) ParseNode(org.apache.phoenix.parse.ParseNode) List(java.util.List) ArrayList(java.util.ArrayList) AggregatePlan(org.apache.phoenix.execute.AggregatePlan) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo) SkipScanFilter(org.apache.phoenix.filter.SkipScanFilter) QueryOptimizer(org.apache.phoenix.optimize.QueryOptimizer) AliasedNode(org.apache.phoenix.parse.AliasedNode) Hint(org.apache.phoenix.parse.HintNode.Hint) ReadOnlyTableException(org.apache.phoenix.schema.ReadOnlyTableException) HintNode(org.apache.phoenix.parse.HintNode) NamedTableNode(org.apache.phoenix.parse.NamedTableNode) Scan(org.apache.hadoop.hbase.client.Scan) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices) TableRef(org.apache.phoenix.schema.TableRef)

Example 5 with HintNode

use of org.apache.phoenix.parse.HintNode in project phoenix by apache.

the class QueryOptimizer method addPlan.

private QueryPlan addPlan(PhoenixStatement statement, SelectStatement select, PTable index, List<? extends PDatum> targetColumns, ParallelIteratorFactory parallelIteratorFactory, QueryPlan dataPlan, boolean isHinted) throws SQLException {
    int nColumns = dataPlan.getProjector().getColumnCount();
    String tableAlias = dataPlan.getTableRef().getTableAlias();
    // double quote in case it's case sensitive
    String alias = tableAlias == null ? null : '"' + tableAlias + '"';
    String schemaName = index.getParentSchemaName().getString();
    schemaName = schemaName.length() == 0 ? null : '"' + schemaName + '"';
    String tableName = '"' + index.getTableName().getString() + '"';
    TableNode table = FACTORY.namedTable(alias, FACTORY.table(schemaName, tableName), select.getTableSamplingRate());
    SelectStatement indexSelect = FACTORY.select(select, table);
    ColumnResolver resolver = FromCompiler.getResolverForQuery(indexSelect, statement.getConnection());
    // We will or will not do tuple projection according to the data plan.
    boolean isProjected = dataPlan.getContext().getResolver().getTables().get(0).getTable().getType() == PTableType.PROJECTED;
    // Check index state of now potentially updated index table to make sure it's active
    TableRef indexTableRef = resolver.getTables().get(0);
    PTable indexTable = indexTableRef.getTable();
    PIndexState indexState = indexTable.getIndexState();
    Map<TableRef, QueryPlan> dataPlans = Collections.singletonMap(indexTableRef, dataPlan);
    if (indexState == PIndexState.ACTIVE || indexState == PIndexState.PENDING_ACTIVE || (indexState == PIndexState.PENDING_DISABLE && isUnderPendingDisableThreshold(indexTableRef.getCurrentTime(), indexTable.getIndexDisableTimestamp()))) {
        try {
            // translate nodes that match expressions that are indexed to the associated column parse node
            indexSelect = ParseNodeRewriter.rewrite(indexSelect, new IndexExpressionParseNodeRewriter(index, null, statement.getConnection(), indexSelect.getUdfParseNodes()));
            QueryCompiler compiler = new QueryCompiler(statement, indexSelect, resolver, targetColumns, parallelIteratorFactory, dataPlan.getContext().getSequenceManager(), isProjected, true, dataPlans);
            QueryPlan plan = compiler.compile();
            // then we can use the index even the query doesn't have where clause.
            if (index.getIndexType() == IndexType.LOCAL && indexSelect.getWhere() == null && !plan.getContext().getDataColumns().isEmpty()) {
                return null;
            }
            indexTableRef = plan.getTableRef();
            indexTable = indexTableRef.getTable();
            indexState = indexTable.getIndexState();
            // must contain all columns from the data table to be able to be used.
            if (indexState == PIndexState.ACTIVE || indexState == PIndexState.PENDING_ACTIVE || (indexState == PIndexState.PENDING_DISABLE && isUnderPendingDisableThreshold(indexTableRef.getCurrentTime(), indexTable.getIndexDisableTimestamp()))) {
                if (plan.getProjector().getColumnCount() == nColumns) {
                    return plan;
                } else if (index.getIndexType() == IndexType.GLOBAL) {
                    String schemaNameStr = index.getSchemaName() == null ? null : index.getSchemaName().getString();
                    String tableNameStr = index.getTableName() == null ? null : index.getTableName().getString();
                    throw new ColumnNotFoundException(schemaNameStr, tableNameStr, null, "*");
                }
            }
        } catch (ColumnNotFoundException e) {
            /* Means that a column is being used that's not in our index.
                 * Since we currently don't keep stats, we don't know the selectivity of the index.
                 * For now, if this is a hinted plan, we will try rewriting the query as a subquery;
                 * otherwise we just don't use this index (as opposed to trying to join back from
                 * the index table to the data table.
                 */
            SelectStatement dataSelect = (SelectStatement) dataPlan.getStatement();
            ParseNode where = dataSelect.getWhere();
            if (isHinted && where != null) {
                StatementContext context = new StatementContext(statement, resolver);
                WhereConditionRewriter whereRewriter = new WhereConditionRewriter(FromCompiler.getResolver(dataPlan.getTableRef()), context);
                where = where.accept(whereRewriter);
                if (where != null) {
                    PTable dataTable = dataPlan.getTableRef().getTable();
                    List<PColumn> pkColumns = dataTable.getPKColumns();
                    List<AliasedNode> aliasedNodes = Lists.<AliasedNode>newArrayListWithExpectedSize(pkColumns.size());
                    List<ParseNode> nodes = Lists.<ParseNode>newArrayListWithExpectedSize(pkColumns.size());
                    boolean isSalted = dataTable.getBucketNum() != null;
                    boolean isTenantSpecific = dataTable.isMultiTenant() && statement.getConnection().getTenantId() != null;
                    int posOffset = (isSalted ? 1 : 0) + (isTenantSpecific ? 1 : 0);
                    for (int i = posOffset; i < pkColumns.size(); i++) {
                        PColumn column = pkColumns.get(i);
                        String indexColName = IndexUtil.getIndexColumnName(column);
                        ParseNode indexColNode = new ColumnParseNode(null, '"' + indexColName + '"', indexColName);
                        PDataType indexColType = IndexUtil.getIndexColumnDataType(column);
                        PDataType dataColType = column.getDataType();
                        if (indexColType != dataColType) {
                            indexColNode = FACTORY.cast(indexColNode, dataColType, null, null);
                        }
                        aliasedNodes.add(FACTORY.aliasedNode(null, indexColNode));
                        nodes.add(new ColumnParseNode(null, '"' + column.getName().getString() + '"'));
                    }
                    SelectStatement innerSelect = FACTORY.select(indexSelect.getFrom(), indexSelect.getHint(), false, aliasedNodes, where, null, null, null, null, null, indexSelect.getBindCount(), false, indexSelect.hasSequence(), Collections.<SelectStatement>emptyList(), indexSelect.getUdfParseNodes());
                    ParseNode outerWhere = FACTORY.in(nodes.size() == 1 ? nodes.get(0) : FACTORY.rowValueConstructor(nodes), FACTORY.subquery(innerSelect, false), false, true);
                    ParseNode extractedCondition = whereRewriter.getExtractedCondition();
                    if (extractedCondition != null) {
                        outerWhere = FACTORY.and(Lists.newArrayList(outerWhere, extractedCondition));
                    }
                    HintNode hint = HintNode.combine(HintNode.subtract(indexSelect.getHint(), new Hint[] { Hint.INDEX, Hint.NO_CHILD_PARENT_JOIN_OPTIMIZATION }), FACTORY.hint("NO_INDEX"));
                    SelectStatement query = FACTORY.select(dataSelect, hint, outerWhere);
                    ColumnResolver queryResolver = FromCompiler.getResolverForQuery(query, statement.getConnection());
                    query = SubqueryRewriter.transform(query, queryResolver, statement.getConnection());
                    queryResolver = FromCompiler.getResolverForQuery(query, statement.getConnection());
                    query = StatementNormalizer.normalize(query, queryResolver);
                    QueryPlan plan = new QueryCompiler(statement, query, queryResolver, targetColumns, parallelIteratorFactory, dataPlan.getContext().getSequenceManager(), isProjected, true, dataPlans).compile();
                    return plan;
                }
            }
        }
    }
    return null;
}
Also used : PIndexState(org.apache.phoenix.schema.PIndexState) BaseQueryPlan(org.apache.phoenix.execute.BaseQueryPlan) QueryPlan(org.apache.phoenix.compile.QueryPlan) QueryCompiler(org.apache.phoenix.compile.QueryCompiler) Hint(org.apache.phoenix.parse.HintNode.Hint) PTable(org.apache.phoenix.schema.PTable) StatementContext(org.apache.phoenix.compile.StatementContext) PColumn(org.apache.phoenix.schema.PColumn) SelectStatement(org.apache.phoenix.parse.SelectStatement) ColumnNotFoundException(org.apache.phoenix.schema.ColumnNotFoundException) PDataType(org.apache.phoenix.schema.types.PDataType) ColumnParseNode(org.apache.phoenix.parse.ColumnParseNode) HintNode(org.apache.phoenix.parse.HintNode) TableNode(org.apache.phoenix.parse.TableNode) JoinTableNode(org.apache.phoenix.parse.JoinTableNode) NamedTableNode(org.apache.phoenix.parse.NamedTableNode) BindTableNode(org.apache.phoenix.parse.BindTableNode) DerivedTableNode(org.apache.phoenix.parse.DerivedTableNode) ColumnParseNode(org.apache.phoenix.parse.ColumnParseNode) AndParseNode(org.apache.phoenix.parse.AndParseNode) ParseNode(org.apache.phoenix.parse.ParseNode) List(java.util.List) IndexExpressionParseNodeRewriter(org.apache.phoenix.parse.IndexExpressionParseNodeRewriter) ColumnResolver(org.apache.phoenix.compile.ColumnResolver) TableRef(org.apache.phoenix.schema.TableRef)

Aggregations

HintNode (org.apache.phoenix.parse.HintNode)6 Hint (org.apache.phoenix.parse.HintNode.Hint)4 ParseNode (org.apache.phoenix.parse.ParseNode)4 SelectStatement (org.apache.phoenix.parse.SelectStatement)4 NamedTableNode (org.apache.phoenix.parse.NamedTableNode)3 PColumn (org.apache.phoenix.schema.PColumn)3 PTable (org.apache.phoenix.schema.PTable)3 TableRef (org.apache.phoenix.schema.TableRef)3 List (java.util.List)2 Scan (org.apache.hadoop.hbase.client.Scan)2 SQLExceptionInfo (org.apache.phoenix.exception.SQLExceptionInfo)2 AggregatePlan (org.apache.phoenix.execute.AggregatePlan)2 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)2 QueryOptimizer (org.apache.phoenix.optimize.QueryOptimizer)2 AliasedNode (org.apache.phoenix.parse.AliasedNode)2 ColumnParseNode (org.apache.phoenix.parse.ColumnParseNode)2 ConnectionQueryServices (org.apache.phoenix.query.ConnectionQueryServices)2 DelegateColumn (org.apache.phoenix.schema.DelegateColumn)2 ReadOnlyTableException (org.apache.phoenix.schema.ReadOnlyTableException)2 ArrayList (java.util.ArrayList)1