Search in sources :

Example 1 with AggregatePlan

use of org.apache.phoenix.execute.AggregatePlan in project phoenix by apache.

the class PostDDLCompiler method compile.

public MutationPlan compile(final List<TableRef> tableRefs, final byte[] emptyCF, final List<byte[]> projectCFs, final List<PColumn> deleteList, final long timestamp) throws SQLException {
    PhoenixStatement statement = new PhoenixStatement(connection);
    final StatementContext context = new StatementContext(statement, new ColumnResolver() {

        @Override
        public List<TableRef> getTables() {
            return tableRefs;
        }

        @Override
        public TableRef resolveTable(String schemaName, String tableName) throws SQLException {
            throw new UnsupportedOperationException();
        }

        @Override
        public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException {
            throw new UnsupportedOperationException();
        }

        @Override
        public List<PFunction> getFunctions() {
            return Collections.<PFunction>emptyList();
        }

        @Override
        public PFunction resolveFunction(String functionName) throws SQLException {
            throw new FunctionNotFoundException(functionName);
        }

        @Override
        public boolean hasUDFs() {
            return false;
        }

        @Override
        public PSchema resolveSchema(String schemaName) throws SQLException {
            throw new SchemaNotFoundException(schemaName);
        }

        @Override
        public List<PSchema> getSchemas() {
            throw new UnsupportedOperationException();
        }
    }, scan, new SequenceManager(statement));
    return new BaseMutationPlan(context, Operation.UPSERT) {

        /* FIXME */
        @Override
        public MutationState execute() throws SQLException {
            if (tableRefs.isEmpty()) {
                return new MutationState(0, 1000, connection);
            }
            boolean wasAutoCommit = connection.getAutoCommit();
            try {
                connection.setAutoCommit(true);
                SQLException sqlE = null;
                /*
                     * Handles:
                     * 1) deletion of all rows for a DROP TABLE and subsequently deletion of all rows for a DROP INDEX;
                     * 2) deletion of all column values for a ALTER TABLE DROP COLUMN
                     * 3) updating the necessary rows to have an empty KV
                     * 4) updating table stats
                     */
                long totalMutationCount = 0;
                for (final TableRef tableRef : tableRefs) {
                    Scan scan = ScanUtil.newScan(context.getScan());
                    SelectStatement select = SelectStatement.COUNT_ONE;
                    // We need to use this tableRef
                    ColumnResolver resolver = new ColumnResolver() {

                        @Override
                        public List<TableRef> getTables() {
                            return Collections.singletonList(tableRef);
                        }

                        @Override
                        public java.util.List<PFunction> getFunctions() {
                            return Collections.emptyList();
                        }

                        ;

                        @Override
                        public TableRef resolveTable(String schemaName, String tableName) throws SQLException {
                            throw new UnsupportedOperationException();
                        }

                        @Override
                        public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException {
                            PColumn column = tableName != null ? tableRef.getTable().getColumnFamily(tableName).getPColumnForColumnName(colName) : tableRef.getTable().getColumnForColumnName(colName);
                            return new ColumnRef(tableRef, column.getPosition());
                        }

                        @Override
                        public PFunction resolveFunction(String functionName) throws SQLException {
                            throw new UnsupportedOperationException();
                        }

                        ;

                        @Override
                        public boolean hasUDFs() {
                            return false;
                        }

                        @Override
                        public List<PSchema> getSchemas() {
                            throw new UnsupportedOperationException();
                        }

                        @Override
                        public PSchema resolveSchema(String schemaName) throws SQLException {
                            throw new SchemaNotFoundException(schemaName);
                        }
                    };
                    PhoenixStatement statement = new PhoenixStatement(connection);
                    StatementContext context = new StatementContext(statement, resolver, scan, new SequenceManager(statement));
                    long ts = timestamp;
                    // in this case, so maybe this is ok.
                    if (ts != HConstants.LATEST_TIMESTAMP && tableRef.getTable().isTransactional()) {
                        ts = TransactionUtil.convertToNanoseconds(ts);
                    }
                    ScanUtil.setTimeRange(scan, scan.getTimeRange().getMin(), ts);
                    if (emptyCF != null) {
                        scan.setAttribute(BaseScannerRegionObserver.EMPTY_CF, emptyCF);
                        scan.setAttribute(BaseScannerRegionObserver.EMPTY_COLUMN_QUALIFIER, EncodedColumnsUtil.getEmptyKeyValueInfo(tableRef.getTable()).getFirst());
                    }
                    ServerCache cache = null;
                    try {
                        if (deleteList != null) {
                            if (deleteList.isEmpty()) {
                                scan.setAttribute(BaseScannerRegionObserver.DELETE_AGG, QueryConstants.TRUE);
                            // In the case of a row deletion, add index metadata so mutable secondary indexing works
                            /* TODO: we currently manually run a scan to delete the index data here
                                    ImmutableBytesWritable ptr = context.getTempPtr();
                                    tableRef.getTable().getIndexMaintainers(ptr);
                                    if (ptr.getLength() > 0) {
                                        IndexMetaDataCacheClient client = new IndexMetaDataCacheClient(connection, tableRef);
                                        cache = client.addIndexMetadataCache(context.getScanRanges(), ptr);
                                        byte[] uuidValue = cache.getId();
                                        scan.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
                                    }
                                    */
                            } else {
                                // In the case of the empty key value column family changing, do not send the index
                                // metadata, as we're currently managing this from the client. It's possible for the
                                // data empty column family to stay the same, while the index empty column family
                                // changes.
                                PColumn column = deleteList.get(0);
                                byte[] cq = column.getColumnQualifierBytes();
                                if (emptyCF == null) {
                                    scan.addColumn(column.getFamilyName().getBytes(), cq);
                                }
                                scan.setAttribute(BaseScannerRegionObserver.DELETE_CF, column.getFamilyName().getBytes());
                                scan.setAttribute(BaseScannerRegionObserver.DELETE_CQ, cq);
                            }
                        }
                        List<byte[]> columnFamilies = Lists.newArrayListWithExpectedSize(tableRef.getTable().getColumnFamilies().size());
                        if (projectCFs == null) {
                            for (PColumnFamily family : tableRef.getTable().getColumnFamilies()) {
                                columnFamilies.add(family.getName().getBytes());
                            }
                        } else {
                            for (byte[] projectCF : projectCFs) {
                                columnFamilies.add(projectCF);
                            }
                        }
                        // Need to project all column families into the scan, since we haven't yet created our empty key value
                        RowProjector projector = ProjectionCompiler.compile(context, SelectStatement.COUNT_ONE, GroupBy.EMPTY_GROUP_BY);
                        context.getAggregationManager().compile(context, GroupBy.EMPTY_GROUP_BY);
                        // since at this point we haven't added the empty key value everywhere.
                        if (columnFamilies != null) {
                            scan.getFamilyMap().clear();
                            for (byte[] family : columnFamilies) {
                                scan.addFamily(family);
                            }
                            projector = new RowProjector(projector, false);
                        }
                        // any other Post DDL operations.
                        try {
                            // Since dropping a VIEW does not affect the underlying data, we do
                            // not need to pass through the view statement here.
                            // Push where clause into scan
                            WhereCompiler.compile(context, select);
                        } catch (ColumnFamilyNotFoundException e) {
                            continue;
                        } catch (ColumnNotFoundException e) {
                            continue;
                        } catch (AmbiguousColumnException e) {
                            continue;
                        }
                        QueryPlan plan = new AggregatePlan(context, select, tableRef, projector, null, null, OrderBy.EMPTY_ORDER_BY, null, GroupBy.EMPTY_GROUP_BY, null);
                        try {
                            ResultIterator iterator = plan.iterator();
                            try {
                                Tuple row = iterator.next();
                                ImmutableBytesWritable ptr = context.getTempPtr();
                                totalMutationCount += (Long) projector.getColumnProjector(0).getValue(row, PLong.INSTANCE, ptr);
                            } catch (SQLException e) {
                                sqlE = e;
                            } finally {
                                try {
                                    iterator.close();
                                } catch (SQLException e) {
                                    if (sqlE == null) {
                                        sqlE = e;
                                    } else {
                                        sqlE.setNextException(e);
                                    }
                                } finally {
                                    if (sqlE != null) {
                                        throw sqlE;
                                    }
                                }
                            }
                        } catch (TableNotFoundException e) {
                        // Ignore and continue, as HBase throws when table hasn't been written to
                        // FIXME: Remove if this is fixed in 0.96
                        }
                    } finally {
                        if (cache != null) {
                            // Remove server cache if there is one
                            cache.close();
                        }
                    }
                }
                final long count = totalMutationCount;
                return new MutationState(1, 1000, connection) {

                    @Override
                    public long getUpdateCount() {
                        return count;
                    }
                };
            } finally {
                if (!wasAutoCommit)
                    connection.setAutoCommit(wasAutoCommit);
            }
        }
    };
}
Also used : ServerCache(org.apache.phoenix.cache.ServerCacheClient.ServerCache) PFunction(org.apache.phoenix.parse.PFunction) SQLException(java.sql.SQLException) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) PColumn(org.apache.phoenix.schema.PColumn) SelectStatement(org.apache.phoenix.parse.SelectStatement) TableNotFoundException(org.apache.phoenix.schema.TableNotFoundException) List(java.util.List) AmbiguousColumnException(org.apache.phoenix.schema.AmbiguousColumnException) AggregatePlan(org.apache.phoenix.execute.AggregatePlan) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) ResultIterator(org.apache.phoenix.iterate.ResultIterator) PSchema(org.apache.phoenix.parse.PSchema) PColumnFamily(org.apache.phoenix.schema.PColumnFamily) ColumnFamilyNotFoundException(org.apache.phoenix.schema.ColumnFamilyNotFoundException) FunctionNotFoundException(org.apache.phoenix.schema.FunctionNotFoundException) ColumnNotFoundException(org.apache.phoenix.schema.ColumnNotFoundException) MutationState(org.apache.phoenix.execute.MutationState) Scan(org.apache.hadoop.hbase.client.Scan) ColumnRef(org.apache.phoenix.schema.ColumnRef) SchemaNotFoundException(org.apache.phoenix.schema.SchemaNotFoundException) TableRef(org.apache.phoenix.schema.TableRef) Tuple(org.apache.phoenix.schema.tuple.Tuple)

Example 2 with AggregatePlan

use of org.apache.phoenix.execute.AggregatePlan in project phoenix by apache.

the class UpsertCompiler method compile.

public MutationPlan compile(UpsertStatement upsert) throws SQLException {
    final PhoenixConnection connection = statement.getConnection();
    ConnectionQueryServices services = connection.getQueryServices();
    final int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
    final int maxSizeBytes = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES);
    List<ColumnName> columnNodes = upsert.getColumns();
    TableRef tableRefToBe = null;
    PTable table = null;
    Set<PColumn> addViewColumnsToBe = Collections.emptySet();
    Set<PColumn> overlapViewColumnsToBe = Collections.emptySet();
    List<PColumn> allColumnsToBe = Collections.emptyList();
    boolean isTenantSpecific = false;
    boolean isSharedViewIndex = false;
    String tenantIdStr = null;
    ColumnResolver resolver = null;
    int[] columnIndexesToBe;
    int nColumnsToSet = 0;
    int[] pkSlotIndexesToBe;
    List<ParseNode> valueNodes = upsert.getValues();
    List<PColumn> targetColumns;
    NamedTableNode tableNode = upsert.getTable();
    String tableName = tableNode.getName().getTableName();
    String schemaName = tableNode.getName().getSchemaName();
    QueryPlan queryPlanToBe = null;
    int nValuesToSet;
    boolean sameTable = false;
    boolean runOnServer = false;
    boolean serverUpsertSelectEnabled = services.getProps().getBoolean(QueryServices.ENABLE_SERVER_UPSERT_SELECT, QueryServicesOptions.DEFAULT_ENABLE_SERVER_UPSERT_SELECT);
    UpsertingParallelIteratorFactory parallelIteratorFactoryToBe = null;
    // Retry once if auto commit is off, as the meta data may
    // be out of date. We do not retry if auto commit is on, as we
    // update the cache up front when we create the resolver in that case.
    boolean retryOnce = !connection.getAutoCommit();
    boolean useServerTimestampToBe = false;
    resolver = FromCompiler.getResolverForMutation(upsert, connection);
    tableRefToBe = resolver.getTables().get(0);
    table = tableRefToBe.getTable();
    // - transactional table with a connection having an SCN
    if (table.getType() == PTableType.VIEW && table.getViewType().isReadOnly()) {
        throw new ReadOnlyTableException(schemaName, tableName);
    } else if (table.isTransactional() && connection.getSCN() != null) {
        throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SPECIFY_SCN_FOR_TXN_TABLE).setSchemaName(schemaName).setTableName(tableName).build().buildException();
    }
    boolean isSalted = table.getBucketNum() != null;
    isTenantSpecific = table.isMultiTenant() && connection.getTenantId() != null;
    isSharedViewIndex = table.getViewIndexId() != null;
    tenantIdStr = isTenantSpecific ? connection.getTenantId().getString() : null;
    int posOffset = isSalted ? 1 : 0;
    // Setup array of column indexes parallel to values that are going to be set
    allColumnsToBe = table.getColumns();
    nColumnsToSet = 0;
    if (table.getViewType() == ViewType.UPDATABLE) {
        addViewColumnsToBe = Sets.newLinkedHashSetWithExpectedSize(allColumnsToBe.size());
        for (PColumn column : allColumnsToBe) {
            if (column.getViewConstant() != null) {
                addViewColumnsToBe.add(column);
            }
        }
    }
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    // Allow full row upsert if no columns or only dynamic ones are specified and values count match
    if (columnNodes.isEmpty() || columnNodes.size() == upsert.getTable().getDynamicColumns().size()) {
        nColumnsToSet = allColumnsToBe.size() - posOffset;
        columnIndexesToBe = new int[nColumnsToSet];
        pkSlotIndexesToBe = new int[columnIndexesToBe.length];
        targetColumns = Lists.newArrayListWithExpectedSize(columnIndexesToBe.length);
        targetColumns.addAll(Collections.<PColumn>nCopies(columnIndexesToBe.length, null));
        int minPKPos = 0;
        if (isSharedViewIndex) {
            PColumn indexIdColumn = table.getPKColumns().get(minPKPos);
            columnIndexesToBe[minPKPos] = indexIdColumn.getPosition();
            targetColumns.set(minPKPos, indexIdColumn);
            minPKPos++;
        }
        if (isTenantSpecific) {
            PColumn tenantColumn = table.getPKColumns().get(minPKPos);
            columnIndexesToBe[minPKPos] = tenantColumn.getPosition();
            targetColumns.set(minPKPos, tenantColumn);
            minPKPos++;
        }
        for (int i = posOffset, j = 0; i < allColumnsToBe.size(); i++) {
            PColumn column = allColumnsToBe.get(i);
            if (SchemaUtil.isPKColumn(column)) {
                pkSlotIndexesToBe[i - posOffset] = j + posOffset;
                if (j++ < minPKPos) {
                    // Skip, as it's already been set above
                    continue;
                }
                minPKPos = 0;
            }
            columnIndexesToBe[i - posOffset + minPKPos] = i;
            targetColumns.set(i - posOffset + minPKPos, column);
        }
        if (!addViewColumnsToBe.isEmpty()) {
            // All view columns overlap in this case
            overlapViewColumnsToBe = addViewColumnsToBe;
            addViewColumnsToBe = Collections.emptySet();
        }
    } else {
        // Size for worse case
        int numColsInUpsert = columnNodes.size();
        nColumnsToSet = numColsInUpsert + addViewColumnsToBe.size() + (isTenantSpecific ? 1 : 0) + +(isSharedViewIndex ? 1 : 0);
        columnIndexesToBe = new int[nColumnsToSet];
        pkSlotIndexesToBe = new int[columnIndexesToBe.length];
        targetColumns = Lists.newArrayListWithExpectedSize(columnIndexesToBe.length);
        targetColumns.addAll(Collections.<PColumn>nCopies(columnIndexesToBe.length, null));
        // TODO: necessary? So we'll get an AIOB exception if it's not replaced
        Arrays.fill(columnIndexesToBe, -1);
        // TODO: necessary? So we'll get an AIOB exception if it's not replaced
        Arrays.fill(pkSlotIndexesToBe, -1);
        BitSet pkColumnsSet = new BitSet(table.getPKColumns().size());
        int i = 0;
        if (isSharedViewIndex) {
            PColumn indexIdColumn = table.getPKColumns().get(i + posOffset);
            columnIndexesToBe[i] = indexIdColumn.getPosition();
            pkColumnsSet.set(pkSlotIndexesToBe[i] = i + posOffset);
            targetColumns.set(i, indexIdColumn);
            i++;
        }
        // Add tenant column directly, as we don't want to resolve it as this will fail
        if (isTenantSpecific) {
            PColumn tenantColumn = table.getPKColumns().get(i + posOffset);
            columnIndexesToBe[i] = tenantColumn.getPosition();
            pkColumnsSet.set(pkSlotIndexesToBe[i] = i + posOffset);
            targetColumns.set(i, tenantColumn);
            i++;
        }
        for (ColumnName colName : columnNodes) {
            ColumnRef ref = resolver.resolveColumn(null, colName.getFamilyName(), colName.getColumnName());
            PColumn column = ref.getColumn();
            if (IndexUtil.getViewConstantValue(column, ptr)) {
                if (overlapViewColumnsToBe.isEmpty()) {
                    overlapViewColumnsToBe = Sets.newHashSetWithExpectedSize(addViewColumnsToBe.size());
                }
                nColumnsToSet--;
                overlapViewColumnsToBe.add(column);
                addViewColumnsToBe.remove(column);
            }
            columnIndexesToBe[i] = ref.getColumnPosition();
            targetColumns.set(i, column);
            if (SchemaUtil.isPKColumn(column)) {
                pkColumnsSet.set(pkSlotIndexesToBe[i] = ref.getPKSlotPosition());
            }
            i++;
        }
        for (PColumn column : addViewColumnsToBe) {
            columnIndexesToBe[i] = column.getPosition();
            targetColumns.set(i, column);
            if (SchemaUtil.isPKColumn(column)) {
                pkColumnsSet.set(pkSlotIndexesToBe[i] = SchemaUtil.getPKPosition(table, column));
            }
            i++;
        }
        // If a table has rowtimestamp col, then we always set it.
        useServerTimestampToBe = table.getRowTimestampColPos() != -1 && !isRowTimestampSet(pkSlotIndexesToBe, table);
        if (useServerTimestampToBe) {
            PColumn rowTimestampCol = table.getPKColumns().get(table.getRowTimestampColPos());
            // Need to resize columnIndexesToBe and pkSlotIndexesToBe to include this extra column.
            columnIndexesToBe = Arrays.copyOf(columnIndexesToBe, columnIndexesToBe.length + 1);
            pkSlotIndexesToBe = Arrays.copyOf(pkSlotIndexesToBe, pkSlotIndexesToBe.length + 1);
            columnIndexesToBe[i] = rowTimestampCol.getPosition();
            pkColumnsSet.set(pkSlotIndexesToBe[i] = table.getRowTimestampColPos());
            targetColumns.add(rowTimestampCol);
            if (valueNodes != null && !valueNodes.isEmpty()) {
                valueNodes.add(getNodeForRowTimestampColumn(rowTimestampCol));
            }
            nColumnsToSet++;
        }
        for (i = posOffset; i < table.getPKColumns().size(); i++) {
            PColumn pkCol = table.getPKColumns().get(i);
            if (!pkColumnsSet.get(i)) {
                if (!pkCol.isNullable() && pkCol.getExpressionStr() == null) {
                    throw new ConstraintViolationException(table.getName().getString() + "." + pkCol.getName().getString() + " may not be null");
                }
            }
        }
    }
    boolean isAutoCommit = connection.getAutoCommit();
    if (valueNodes == null) {
        SelectStatement select = upsert.getSelect();
        assert (select != null);
        select = SubselectRewriter.flatten(select, connection);
        ColumnResolver selectResolver = FromCompiler.getResolverForQuery(select, connection, false, upsert.getTable().getName());
        select = StatementNormalizer.normalize(select, selectResolver);
        select = prependTenantAndViewConstants(table, select, tenantIdStr, addViewColumnsToBe, useServerTimestampToBe);
        SelectStatement transformedSelect = SubqueryRewriter.transform(select, selectResolver, connection);
        if (transformedSelect != select) {
            selectResolver = FromCompiler.getResolverForQuery(transformedSelect, connection, false, upsert.getTable().getName());
            select = StatementNormalizer.normalize(transformedSelect, selectResolver);
        }
        sameTable = !select.isJoin() && tableRefToBe.equals(selectResolver.getTables().get(0));
        tableRefToBe = adjustTimestampToMinOfSameTable(tableRefToBe, selectResolver.getTables());
        /* We can run the upsert in a coprocessor if:
             * 1) from has only 1 table or server UPSERT SELECT is enabled
             * 2) the select query isn't doing aggregation (which requires a client-side final merge)
             * 3) autoCommit is on
             * 4) the table is not immutable with indexes, as the client is the one that figures out the additional
             *    puts for index tables.
             * 5) no limit clause, as the limit clause requires client-side post processing
             * 6) no sequences, as sequences imply that the order of upsert must match the order of
             *    selection. TODO: change this and only force client side if there's a ORDER BY on the sequence value
             * Otherwise, run the query to pull the data from the server
             * and populate the MutationState (upto a limit).
            */
        if (!(select.isAggregate() || select.isDistinct() || select.getLimit() != null || select.hasSequence())) {
            // We can pipeline the upsert select instead of spooling everything to disk first,
            // if we don't have any post processing that's required.
            parallelIteratorFactoryToBe = new UpsertingParallelIteratorFactory(connection, tableRefToBe, useServerTimestampToBe);
            // If we're in the else, then it's not an aggregate, distinct, limited, or sequence using query,
            // so we might be able to run it entirely on the server side.
            // region space managed by region servers. So we bail out on executing on server side.
            runOnServer = (sameTable || serverUpsertSelectEnabled) && isAutoCommit && !table.isTransactional() && !(table.isImmutableRows() && !table.getIndexes().isEmpty()) && !select.isJoin() && table.getRowTimestampColPos() == -1;
        }
        // If we may be able to run on the server, add a hint that favors using the data table
        // if all else is equal.
        // TODO: it'd be nice if we could figure out in advance if the PK is potentially changing,
        // as this would disallow running on the server. We currently use the row projector we
        // get back to figure this out.
        HintNode hint = upsert.getHint();
        if (!upsert.getHint().hasHint(Hint.USE_INDEX_OVER_DATA_TABLE)) {
            hint = HintNode.create(hint, Hint.USE_DATA_OVER_INDEX_TABLE);
        }
        select = SelectStatement.create(select, hint);
        // Use optimizer to choose the best plan
        try {
            QueryCompiler compiler = new QueryCompiler(statement, select, selectResolver, targetColumns, parallelIteratorFactoryToBe, new SequenceManager(statement), false);
            queryPlanToBe = compiler.compile();
            // steps and parallelIteratorFactory did not take effect.
            if (queryPlanToBe.getTableRef().getTable().getType() == PTableType.PROJECTED || queryPlanToBe.getTableRef().getTable().getType() == PTableType.SUBQUERY) {
                parallelIteratorFactoryToBe = null;
            }
        } catch (MetaDataEntityNotFoundException e) {
            // don't retry if select clause has meta data entities that aren't found, as we already updated the cache
            retryOnce = false;
            throw e;
        }
        nValuesToSet = queryPlanToBe.getProjector().getColumnCount();
    // Cannot auto commit if doing aggregation or topN or salted
    // Salted causes problems because the row may end up living on a different region
    } else {
        nValuesToSet = valueNodes.size() + addViewColumnsToBe.size() + (isTenantSpecific ? 1 : 0) + (isSharedViewIndex ? 1 : 0);
    }
    // Resize down to allow a subset of columns to be specifiable
    if (columnNodes.isEmpty() && columnIndexesToBe.length >= nValuesToSet) {
        nColumnsToSet = nValuesToSet;
        columnIndexesToBe = Arrays.copyOf(columnIndexesToBe, nValuesToSet);
        pkSlotIndexesToBe = Arrays.copyOf(pkSlotIndexesToBe, nValuesToSet);
    }
    if (nValuesToSet != nColumnsToSet) {
        // been removed and the added back and we wouldn't detect that here.
        throw new UpsertColumnsValuesMismatchException(schemaName, tableName, "Numbers of columns: " + nColumnsToSet + ". Number of values: " + nValuesToSet);
    }
    final QueryPlan originalQueryPlan = queryPlanToBe;
    RowProjector projectorToBe = null;
    // Optimize only after all checks have been performed
    if (valueNodes == null) {
        queryPlanToBe = new QueryOptimizer(services).optimize(queryPlanToBe, statement, targetColumns, parallelIteratorFactoryToBe);
        projectorToBe = queryPlanToBe.getProjector();
    }
    final List<PColumn> allColumns = allColumnsToBe;
    final RowProjector projector = projectorToBe;
    final QueryPlan queryPlan = queryPlanToBe;
    final TableRef tableRef = tableRefToBe;
    final Set<PColumn> addViewColumns = addViewColumnsToBe;
    final Set<PColumn> overlapViewColumns = overlapViewColumnsToBe;
    final UpsertingParallelIteratorFactory parallelIteratorFactory = parallelIteratorFactoryToBe;
    final int[] columnIndexes = columnIndexesToBe;
    final int[] pkSlotIndexes = pkSlotIndexesToBe;
    final boolean useServerTimestamp = useServerTimestampToBe;
    if (table.getRowTimestampColPos() == -1 && useServerTimestamp) {
        throw new IllegalStateException("For a table without row timestamp column, useServerTimestamp cannot be true");
    }
    /////////////////////////////////////////////////////////////////////
    if (valueNodes == null) {
        // Before we re-order, check that for updatable view columns
        // the projected expression either matches the column name or
        // is a constant with the same required value.
        throwIfNotUpdatable(tableRef, overlapViewColumnsToBe, targetColumns, projector, sameTable);
        /////////////////////////////////////////////////////////////////////
        if (runOnServer) {
            // At most this array will grow bigger by the number of PK columns
            int[] allColumnsIndexes = Arrays.copyOf(columnIndexes, columnIndexes.length + nValuesToSet);
            int[] reverseColumnIndexes = new int[table.getColumns().size()];
            List<Expression> projectedExpressions = Lists.newArrayListWithExpectedSize(reverseColumnIndexes.length);
            Arrays.fill(reverseColumnIndexes, -1);
            for (int i = 0; i < nValuesToSet; i++) {
                projectedExpressions.add(projector.getColumnProjector(i).getExpression());
                reverseColumnIndexes[columnIndexes[i]] = i;
            }
            /*
                 * Order projected columns and projected expressions with PK columns
                 * leading order by slot position
                 */
            int offset = table.getBucketNum() == null ? 0 : 1;
            for (int i = 0; i < table.getPKColumns().size() - offset; i++) {
                PColumn column = table.getPKColumns().get(i + offset);
                int pos = reverseColumnIndexes[column.getPosition()];
                if (pos == -1) {
                    // it's not valid to set a fixed width type to null.
                    if (column.getDataType().isFixedWidth()) {
                        continue;
                    }
                    // Add literal null for missing PK columns
                    pos = projectedExpressions.size();
                    Expression literalNull = LiteralExpression.newConstant(null, column.getDataType(), Determinism.ALWAYS);
                    projectedExpressions.add(literalNull);
                    allColumnsIndexes[pos] = column.getPosition();
                }
                // Swap select expression at pos with i
                Collections.swap(projectedExpressions, i, pos);
                // Swap column indexes and reverse column indexes too
                int tempPos = allColumnsIndexes[i];
                allColumnsIndexes[i] = allColumnsIndexes[pos];
                allColumnsIndexes[pos] = tempPos;
                reverseColumnIndexes[tempPos] = pos;
                reverseColumnIndexes[i] = i;
            }
            // If any pk slots are changing and server side UPSERT SELECT is disabled, do not run on server
            if (!serverUpsertSelectEnabled && ExpressionUtil.isPkPositionChanging(new TableRef(table), projectedExpressions)) {
                runOnServer = false;
            }
            /////////////////////////////////////////////////////////////////////
            if (runOnServer) {
                // Iterate through columns being projected
                List<PColumn> projectedColumns = Lists.newArrayListWithExpectedSize(projectedExpressions.size());
                int posOff = table.getBucketNum() != null ? 1 : 0;
                for (int i = 0; i < projectedExpressions.size(); i++) {
                    // Must make new column if position has changed
                    PColumn column = allColumns.get(allColumnsIndexes[i]);
                    projectedColumns.add(column.getPosition() == i + posOff ? column : new PColumnImpl(column, i + posOff));
                }
                // Build table from projectedColumns
                // Hack to add default column family to be used on server in case no value column is projected.
                PTable projectedTable = PTableImpl.makePTable(table, projectedColumns, PNameFactory.newName(SchemaUtil.getEmptyColumnFamily(table)));
                SelectStatement select = SelectStatement.create(SelectStatement.COUNT_ONE, upsert.getHint());
                StatementContext statementContext = queryPlan.getContext();
                RowProjector aggProjectorToBe = ProjectionCompiler.compile(statementContext, select, GroupBy.EMPTY_GROUP_BY);
                statementContext.getAggregationManager().compile(queryPlan.getContext(), GroupBy.EMPTY_GROUP_BY);
                if (queryPlan.getProjector().projectEveryRow()) {
                    aggProjectorToBe = new RowProjector(aggProjectorToBe, true);
                }
                final RowProjector aggProjector = aggProjectorToBe;
                /*
                     * Transfer over PTable representing subset of columns selected, but all PK columns.
                     * Move columns setting PK first in pkSlot order, adding LiteralExpression of null for any missing ones.
                     * Transfer over List<Expression> for projection.
                     * In region scan, evaluate expressions in order, collecting first n columns for PK and collection non PK in mutation Map
                     * Create the PRow and get the mutations, adding them to the batch
                     */
                final StatementContext context = queryPlan.getContext();
                final Scan scan = context.getScan();
                // Propagate IGNORE_NEWER_MUTATIONS when replaying mutations since there will be
                // future dated data row mutations that will get in the way of generating the
                // correct index rows on replay.
                scan.setAttribute(BaseScannerRegionObserver.IGNORE_NEWER_MUTATIONS, PDataType.TRUE_BYTES);
                scan.setAttribute(BaseScannerRegionObserver.UPSERT_SELECT_TABLE, UngroupedAggregateRegionObserver.serialize(projectedTable));
                scan.setAttribute(BaseScannerRegionObserver.UPSERT_SELECT_EXPRS, UngroupedAggregateRegionObserver.serialize(projectedExpressions));
                // Ignore order by - it has no impact
                final QueryPlan aggPlan = new AggregatePlan(context, select, statementContext.getCurrentTable(), aggProjector, null, null, OrderBy.EMPTY_ORDER_BY, null, GroupBy.EMPTY_GROUP_BY, null);
                return new MutationPlan() {

                    @Override
                    public ParameterMetaData getParameterMetaData() {
                        return queryPlan.getContext().getBindManager().getParameterMetaData();
                    }

                    @Override
                    public StatementContext getContext() {
                        return queryPlan.getContext();
                    }

                    @Override
                    public TableRef getTargetRef() {
                        return tableRef;
                    }

                    @Override
                    public Set<TableRef> getSourceRefs() {
                        return originalQueryPlan.getSourceRefs();
                    }

                    @Override
                    public Operation getOperation() {
                        return operation;
                    }

                    @Override
                    public MutationState execute() throws SQLException {
                        ImmutableBytesWritable ptr = context.getTempPtr();
                        PTable table = tableRef.getTable();
                        table.getIndexMaintainers(ptr, context.getConnection());
                        byte[] txState = table.isTransactional() ? connection.getMutationState().encodeTransaction() : ByteUtil.EMPTY_BYTE_ARRAY;
                        if (ptr.getLength() > 0) {
                            byte[] uuidValue = ServerCacheClient.generateId();
                            scan.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
                            scan.setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, ptr.get());
                            scan.setAttribute(BaseScannerRegionObserver.TX_STATE, txState);
                        }
                        ResultIterator iterator = aggPlan.iterator();
                        try {
                            Tuple row = iterator.next();
                            final long mutationCount = (Long) aggProjector.getColumnProjector(0).getValue(row, PLong.INSTANCE, ptr);
                            for (PTable index : getNewIndexes(table)) {
                                new MetaDataClient(connection).buildIndex(index, tableRef, scan.getTimeRange().getMax(), scan.getTimeRange().getMax() + 1);
                            }
                            return new MutationState(maxSize, maxSizeBytes, connection) {

                                @Override
                                public long getUpdateCount() {
                                    return mutationCount;
                                }
                            };
                        } finally {
                            iterator.close();
                        }
                    }

                    private List<PTable> getNewIndexes(PTable table) throws SQLException {
                        List<PTable> indexes = table.getIndexes();
                        List<PTable> newIndexes = new ArrayList<PTable>(2);
                        PTable newTable = PhoenixRuntime.getTableNoCache(connection, table.getName().getString());
                        for (PTable index : newTable.getIndexes()) {
                            if (!indexes.contains(index)) {
                                newIndexes.add(index);
                            }
                        }
                        return newIndexes;
                    }

                    @Override
                    public ExplainPlan getExplainPlan() throws SQLException {
                        List<String> queryPlanSteps = aggPlan.getExplainPlan().getPlanSteps();
                        List<String> planSteps = Lists.newArrayListWithExpectedSize(queryPlanSteps.size() + 1);
                        planSteps.add("UPSERT ROWS");
                        planSteps.addAll(queryPlanSteps);
                        return new ExplainPlan(planSteps);
                    }

                    @Override
                    public Long getEstimatedRowsToScan() throws SQLException {
                        return aggPlan.getEstimatedRowsToScan();
                    }

                    @Override
                    public Long getEstimatedBytesToScan() throws SQLException {
                        return aggPlan.getEstimatedBytesToScan();
                    }
                };
            }
        }
        /////////////////////////////////////////////////////////////////////
        return new MutationPlan() {

            @Override
            public ParameterMetaData getParameterMetaData() {
                return queryPlan.getContext().getBindManager().getParameterMetaData();
            }

            @Override
            public StatementContext getContext() {
                return queryPlan.getContext();
            }

            @Override
            public TableRef getTargetRef() {
                return tableRef;
            }

            @Override
            public Set<TableRef> getSourceRefs() {
                return originalQueryPlan.getSourceRefs();
            }

            @Override
            public Operation getOperation() {
                return operation;
            }

            @Override
            public MutationState execute() throws SQLException {
                ResultIterator iterator = queryPlan.iterator();
                if (parallelIteratorFactory == null) {
                    return upsertSelect(new StatementContext(statement), tableRef, projector, iterator, columnIndexes, pkSlotIndexes, useServerTimestamp, false);
                }
                try {
                    parallelIteratorFactory.setRowProjector(projector);
                    parallelIteratorFactory.setColumnIndexes(columnIndexes);
                    parallelIteratorFactory.setPkSlotIndexes(pkSlotIndexes);
                    Tuple tuple;
                    long totalRowCount = 0;
                    StatementContext context = queryPlan.getContext();
                    while ((tuple = iterator.next()) != null) {
                        // Runs query
                        Cell kv = tuple.getValue(0);
                        totalRowCount += PLong.INSTANCE.getCodec().decodeLong(kv.getValueArray(), kv.getValueOffset(), SortOrder.getDefault());
                    }
                    // Return total number of rows that have been updated. In the case of auto commit being off
                    // the mutations will all be in the mutation state of the current connection.
                    MutationState mutationState = new MutationState(maxSize, maxSizeBytes, statement.getConnection(), totalRowCount);
                    /*
                         *  All the metrics collected for measuring the reads done by the parallel mutating iterators
                         *  is included in the ReadMetricHolder of the statement context. Include these metrics in the
                         *  returned mutation state so they can be published on commit. 
                         */
                    mutationState.setReadMetricQueue(context.getReadMetricsQueue());
                    return mutationState;
                } finally {
                    iterator.close();
                }
            }

            @Override
            public ExplainPlan getExplainPlan() throws SQLException {
                List<String> queryPlanSteps = queryPlan.getExplainPlan().getPlanSteps();
                List<String> planSteps = Lists.newArrayListWithExpectedSize(queryPlanSteps.size() + 1);
                planSteps.add("UPSERT SELECT");
                planSteps.addAll(queryPlanSteps);
                return new ExplainPlan(planSteps);
            }

            @Override
            public Long getEstimatedRowsToScan() throws SQLException {
                return queryPlan.getEstimatedRowsToScan();
            }

            @Override
            public Long getEstimatedBytesToScan() throws SQLException {
                return queryPlan.getEstimatedBytesToScan();
            }
        };
    }
    ////////////////////////////////////////////////////////////////////
    // UPSERT VALUES
    /////////////////////////////////////////////////////////////////////
    final byte[][] values = new byte[nValuesToSet][];
    int nodeIndex = 0;
    if (isSharedViewIndex) {
        values[nodeIndex++] = MetaDataUtil.getViewIndexIdDataType().toBytes(table.getViewIndexId());
    }
    if (isTenantSpecific) {
        PName tenantId = connection.getTenantId();
        values[nodeIndex++] = ScanUtil.getTenantIdBytes(table.getRowKeySchema(), table.getBucketNum() != null, tenantId, isSharedViewIndex);
    }
    final int nodeIndexOffset = nodeIndex;
    // Allocate array based on size of all columns in table,
    // since some values may not be set (if they're nullable).
    final StatementContext context = new StatementContext(statement, resolver, new Scan(), new SequenceManager(statement));
    UpsertValuesCompiler expressionBuilder = new UpsertValuesCompiler(context);
    final List<Expression> constantExpressions = Lists.newArrayListWithExpectedSize(valueNodes.size());
    // and initialize them in one batch
    for (ParseNode valueNode : valueNodes) {
        if (!valueNode.isStateless()) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.VALUE_IN_UPSERT_NOT_CONSTANT).build().buildException();
        }
        PColumn column = allColumns.get(columnIndexes[nodeIndex]);
        expressionBuilder.setColumn(column);
        Expression expression = valueNode.accept(expressionBuilder);
        if (expression.getDataType() != null && !expression.getDataType().isCastableTo(column.getDataType())) {
            throw TypeMismatchException.newException(expression.getDataType(), column.getDataType(), "expression: " + expression.toString() + " in column " + column);
        }
        constantExpressions.add(expression);
        nodeIndex++;
    }
    byte[] onDupKeyBytesToBe = null;
    List<Pair<ColumnName, ParseNode>> onDupKeyPairs = upsert.getOnDupKeyPairs();
    if (onDupKeyPairs != null) {
        if (table.isImmutableRows()) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_USE_ON_DUP_KEY_FOR_IMMUTABLE).setSchemaName(table.getSchemaName().getString()).setTableName(table.getTableName().getString()).build().buildException();
        }
        if (table.isTransactional()) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_USE_ON_DUP_KEY_FOR_TRANSACTIONAL).setSchemaName(table.getSchemaName().getString()).setTableName(table.getTableName().getString()).build().buildException();
        }
        if (connection.getSCN() != null) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SET_SCN_IN_ON_DUP_KEY).setSchemaName(table.getSchemaName().getString()).setTableName(table.getTableName().getString()).build().buildException();
        }
        if (onDupKeyPairs.isEmpty()) {
            // ON DUPLICATE KEY IGNORE
            onDupKeyBytesToBe = PhoenixIndexBuilder.serializeOnDupKeyIgnore();
        } else {
            // ON DUPLICATE KEY UPDATE
            int position = 1;
            UpdateColumnCompiler compiler = new UpdateColumnCompiler(context);
            int nColumns = onDupKeyPairs.size();
            List<Expression> updateExpressions = Lists.newArrayListWithExpectedSize(nColumns);
            LinkedHashSet<PColumn> updateColumns = Sets.newLinkedHashSetWithExpectedSize(nColumns + 1);
            updateColumns.add(new PColumnImpl(// Use first PK column name as we know it won't conflict with others
            table.getPKColumns().get(0).getName(), null, PVarbinary.INSTANCE, null, null, false, 0, SortOrder.getDefault(), 0, null, false, null, false, false, null));
            for (Pair<ColumnName, ParseNode> columnPair : onDupKeyPairs) {
                ColumnName colName = columnPair.getFirst();
                PColumn updateColumn = resolver.resolveColumn(null, colName.getFamilyName(), colName.getColumnName()).getColumn();
                if (SchemaUtil.isPKColumn(updateColumn)) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_UPDATE_PK_ON_DUP_KEY).setSchemaName(table.getSchemaName().getString()).setTableName(table.getTableName().getString()).setColumnName(updateColumn.getName().getString()).build().buildException();
                }
                final int columnPosition = position++;
                if (!updateColumns.add(new DelegateColumn(updateColumn) {

                    @Override
                    public int getPosition() {
                        return columnPosition;
                    }
                })) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.DUPLICATE_COLUMN_IN_ON_DUP_KEY).setSchemaName(table.getSchemaName().getString()).setTableName(table.getTableName().getString()).setColumnName(updateColumn.getName().getString()).build().buildException();
                }
                ;
                ParseNode updateNode = columnPair.getSecond();
                compiler.setColumn(updateColumn);
                Expression updateExpression = updateNode.accept(compiler);
                // Check that updateExpression is coercible to updateColumn
                if (updateExpression.getDataType() != null && !updateExpression.getDataType().isCastableTo(updateColumn.getDataType())) {
                    throw TypeMismatchException.newException(updateExpression.getDataType(), updateColumn.getDataType(), "expression: " + updateExpression.toString() + " for column " + updateColumn);
                }
                if (compiler.isAggregate()) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.AGGREGATION_NOT_ALLOWED_IN_ON_DUP_KEY).setSchemaName(table.getSchemaName().getString()).setTableName(table.getTableName().getString()).setColumnName(updateColumn.getName().getString()).build().buildException();
                }
                updateExpressions.add(updateExpression);
            }
            PTable onDupKeyTable = PTableImpl.makePTable(table, updateColumns);
            onDupKeyBytesToBe = PhoenixIndexBuilder.serializeOnDupKeyUpdate(onDupKeyTable, updateExpressions);
        }
    }
    final byte[] onDupKeyBytes = onDupKeyBytesToBe;
    return new MutationPlan() {

        @Override
        public ParameterMetaData getParameterMetaData() {
            return context.getBindManager().getParameterMetaData();
        }

        @Override
        public StatementContext getContext() {
            return context;
        }

        @Override
        public TableRef getTargetRef() {
            return tableRef;
        }

        @Override
        public Set<TableRef> getSourceRefs() {
            return Collections.emptySet();
        }

        @Override
        public Operation getOperation() {
            return operation;
        }

        @Override
        public MutationState execute() throws SQLException {
            ImmutableBytesWritable ptr = context.getTempPtr();
            final SequenceManager sequenceManager = context.getSequenceManager();
            // Next evaluate all the expressions
            int nodeIndex = nodeIndexOffset;
            PTable table = tableRef.getTable();
            Tuple tuple = sequenceManager.getSequenceCount() == 0 ? null : sequenceManager.newSequenceTuple(null);
            for (Expression constantExpression : constantExpressions) {
                PColumn column = allColumns.get(columnIndexes[nodeIndex]);
                constantExpression.evaluate(tuple, ptr);
                Object value = null;
                if (constantExpression.getDataType() != null) {
                    value = constantExpression.getDataType().toObject(ptr, constantExpression.getSortOrder(), constantExpression.getMaxLength(), constantExpression.getScale());
                    if (!constantExpression.getDataType().isCoercibleTo(column.getDataType(), value)) {
                        throw TypeMismatchException.newException(constantExpression.getDataType(), column.getDataType(), "expression: " + constantExpression.toString() + " in column " + column);
                    }
                    if (!column.getDataType().isSizeCompatible(ptr, value, constantExpression.getDataType(), constantExpression.getSortOrder(), constantExpression.getMaxLength(), constantExpression.getScale(), column.getMaxLength(), column.getScale())) {
                        throw new SQLExceptionInfo.Builder(SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY).setColumnName(column.getName().getString()).setMessage("value=" + constantExpression.toString()).build().buildException();
                    }
                }
                column.getDataType().coerceBytes(ptr, value, constantExpression.getDataType(), constantExpression.getMaxLength(), constantExpression.getScale(), constantExpression.getSortOrder(), column.getMaxLength(), column.getScale(), column.getSortOrder(), table.rowKeyOrderOptimizable());
                if (overlapViewColumns.contains(column) && Bytes.compareTo(ptr.get(), ptr.getOffset(), ptr.getLength(), column.getViewConstant(), 0, column.getViewConstant().length - 1) != 0) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_UPDATE_VIEW_COLUMN).setColumnName(column.getName().getString()).setMessage("value=" + constantExpression.toString()).build().buildException();
                }
                values[nodeIndex] = ByteUtil.copyKeyBytesIfNecessary(ptr);
                nodeIndex++;
            }
            // Add columns based on view
            for (PColumn column : addViewColumns) {
                if (IndexUtil.getViewConstantValue(column, ptr)) {
                    values[nodeIndex++] = ByteUtil.copyKeyBytesIfNecessary(ptr);
                } else {
                    throw new IllegalStateException();
                }
            }
            Map<ImmutableBytesPtr, RowMutationState> mutation = Maps.newHashMapWithExpectedSize(1);
            IndexMaintainer indexMaintainer = null;
            byte[][] viewConstants = null;
            if (table.getIndexType() == IndexType.LOCAL) {
                PTable parentTable = statement.getConnection().getMetaDataCache().getTableRef(new PTableKey(statement.getConnection().getTenantId(), table.getParentName().getString())).getTable();
                indexMaintainer = table.getIndexMaintainer(parentTable, connection);
                viewConstants = IndexUtil.getViewConstants(parentTable);
            }
            setValues(values, pkSlotIndexes, columnIndexes, table, mutation, statement, useServerTimestamp, indexMaintainer, viewConstants, onDupKeyBytes, 0);
            return new MutationState(tableRef, mutation, 0, maxSize, maxSizeBytes, connection);
        }

        @Override
        public ExplainPlan getExplainPlan() throws SQLException {
            List<String> planSteps = Lists.newArrayListWithExpectedSize(2);
            if (context.getSequenceManager().getSequenceCount() > 0) {
                planSteps.add("CLIENT RESERVE " + context.getSequenceManager().getSequenceCount() + " SEQUENCES");
            }
            planSteps.add("PUT SINGLE ROW");
            return new ExplainPlan(planSteps);
        }

        @Override
        public Long getEstimatedRowsToScan() throws SQLException {
            return 0l;
        }

        @Override
        public Long getEstimatedBytesToScan() throws SQLException {
            return 0l;
        }
    };
}
Also used : PhoenixIndexBuilder(org.apache.phoenix.index.PhoenixIndexBuilder) ArrayList(java.util.ArrayList) PTable(org.apache.phoenix.schema.PTable) DelegateColumn(org.apache.phoenix.schema.DelegateColumn) BindParseNode(org.apache.phoenix.parse.BindParseNode) SequenceValueParseNode(org.apache.phoenix.parse.SequenceValueParseNode) ParseNode(org.apache.phoenix.parse.ParseNode) LiteralParseNode(org.apache.phoenix.parse.LiteralParseNode) ConstraintViolationException(org.apache.phoenix.schema.ConstraintViolationException) AggregatePlan(org.apache.phoenix.execute.AggregatePlan) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo) Cell(org.apache.hadoop.hbase.Cell) MetaDataClient(org.apache.phoenix.schema.MetaDataClient) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) BitSet(java.util.BitSet) ResultIterator(org.apache.phoenix.iterate.ResultIterator) ReadOnlyTableException(org.apache.phoenix.schema.ReadOnlyTableException) HintNode(org.apache.phoenix.parse.HintNode) PLong(org.apache.phoenix.schema.types.PLong) PUnsignedLong(org.apache.phoenix.schema.types.PUnsignedLong) Scan(org.apache.hadoop.hbase.client.Scan) ColumnRef(org.apache.phoenix.schema.ColumnRef) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) PColumnImpl(org.apache.phoenix.schema.PColumnImpl) PColumn(org.apache.phoenix.schema.PColumn) SelectStatement(org.apache.phoenix.parse.SelectStatement) MetaDataEntityNotFoundException(org.apache.phoenix.schema.MetaDataEntityNotFoundException) IndexMaintainer(org.apache.phoenix.index.IndexMaintainer) UpsertColumnsValuesMismatchException(org.apache.phoenix.schema.UpsertColumnsValuesMismatchException) Pair(org.apache.hadoop.hbase.util.Pair) QueryOptimizer(org.apache.phoenix.optimize.QueryOptimizer) Hint(org.apache.phoenix.parse.HintNode.Hint) PSmallint(org.apache.phoenix.schema.types.PSmallint) ColumnName(org.apache.phoenix.parse.ColumnName) MutationState(org.apache.phoenix.execute.MutationState) RowMutationState(org.apache.phoenix.execute.MutationState.RowMutationState) Expression(org.apache.phoenix.expression.Expression) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) PName(org.apache.phoenix.schema.PName) NamedTableNode(org.apache.phoenix.parse.NamedTableNode) PTableKey(org.apache.phoenix.schema.PTableKey) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices) TableRef(org.apache.phoenix.schema.TableRef) Tuple(org.apache.phoenix.schema.tuple.Tuple) RowMutationState(org.apache.phoenix.execute.MutationState.RowMutationState)

Example 3 with AggregatePlan

use of org.apache.phoenix.execute.AggregatePlan in project phoenix by apache.

the class QueryCompiler method compileSingleFlatQuery.

protected QueryPlan compileSingleFlatQuery(StatementContext context, SelectStatement select, List<Object> binds, boolean asSubquery, boolean allowPageFilter, QueryPlan innerPlan, TupleProjector innerPlanTupleProjector, boolean isInRowKeyOrder) throws SQLException {
    PTable projectedTable = null;
    if (this.projectTuples) {
        projectedTable = TupleProjectionCompiler.createProjectedTable(select, context);
        if (projectedTable != null) {
            context.setResolver(FromCompiler.getResolverForProjectedTable(projectedTable, context.getConnection(), select.getUdfParseNodes()));
        }
    }
    ColumnResolver resolver = context.getResolver();
    TableRef tableRef = context.getCurrentTable();
    PTable table = tableRef.getTable();
    ParseNode viewWhere = null;
    if (table.getViewStatement() != null) {
        viewWhere = new SQLParser(table.getViewStatement()).parseQuery().getWhere();
    }
    Integer limit = LimitCompiler.compile(context, select);
    Integer offset = OffsetCompiler.compile(context, select);
    GroupBy groupBy = GroupByCompiler.compile(context, select, isInRowKeyOrder);
    // Optimize the HAVING clause by finding any group by expressions that can be moved
    // to the WHERE clause
    select = HavingCompiler.rewrite(context, select, groupBy);
    Expression having = HavingCompiler.compile(context, select, groupBy);
    // expressions as group by key expressions since they're pre, not post filtered.
    if (innerPlan == null && !tableRef.equals(resolver.getTables().get(0))) {
        context.setResolver(FromCompiler.getResolver(context.getConnection(), tableRef, select.getUdfParseNodes()));
    }
    Set<SubqueryParseNode> subqueries = Sets.<SubqueryParseNode>newHashSet();
    Expression where = WhereCompiler.compile(context, select, viewWhere, subqueries);
    // Recompile GROUP BY now that we've figured out our ScanRanges so we know
    // definitively whether or not we'll traverse in row key order.
    groupBy = groupBy.compile(context, innerPlanTupleProjector);
    // recover resolver
    context.setResolver(resolver);
    RowProjector projector = ProjectionCompiler.compile(context, select, groupBy, asSubquery ? Collections.<PDatum>emptyList() : targetColumns, where);
    OrderBy orderBy = OrderByCompiler.compile(context, select, groupBy, limit, offset, projector, groupBy == GroupBy.EMPTY_GROUP_BY ? innerPlanTupleProjector : null, isInRowKeyOrder);
    context.getAggregationManager().compile(context, groupBy);
    // Final step is to build the query plan
    if (!asSubquery) {
        int maxRows = statement.getMaxRows();
        if (maxRows > 0) {
            if (limit != null) {
                limit = Math.min(limit, maxRows);
            } else {
                limit = maxRows;
            }
        }
    }
    if (projectedTable != null) {
        TupleProjector.serializeProjectorIntoScan(context.getScan(), new TupleProjector(projectedTable));
    }
    QueryPlan plan = innerPlan;
    if (plan == null) {
        ParallelIteratorFactory parallelIteratorFactory = asSubquery ? null : this.parallelIteratorFactory;
        plan = select.getFrom() == null ? new LiteralResultIterationPlan(context, select, tableRef, projector, limit, offset, orderBy, parallelIteratorFactory) : (select.isAggregate() || select.isDistinct() ? new AggregatePlan(context, select, tableRef, projector, limit, offset, orderBy, parallelIteratorFactory, groupBy, having) : new ScanPlan(context, select, tableRef, projector, limit, offset, orderBy, parallelIteratorFactory, allowPageFilter));
    }
    if (!subqueries.isEmpty()) {
        int count = subqueries.size();
        WhereClauseSubPlan[] subPlans = new WhereClauseSubPlan[count];
        int i = 0;
        for (SubqueryParseNode subqueryNode : subqueries) {
            SelectStatement stmt = subqueryNode.getSelectNode();
            subPlans[i++] = new WhereClauseSubPlan(compileSubquery(stmt, false), stmt, subqueryNode.expectSingleRow());
        }
        plan = HashJoinPlan.create(select, plan, null, subPlans);
    }
    if (innerPlan != null) {
        if (LiteralExpression.isTrue(where)) {
            // we do not pass "true" as filter
            where = null;
        }
        plan = select.isAggregate() || select.isDistinct() ? new ClientAggregatePlan(context, select, tableRef, projector, limit, offset, where, orderBy, groupBy, having, plan) : new ClientScanPlan(context, select, tableRef, projector, limit, offset, where, orderBy, plan);
    }
    return plan;
}
Also used : TupleProjector(org.apache.phoenix.execute.TupleProjector) ClientAggregatePlan(org.apache.phoenix.execute.ClientAggregatePlan) PTable(org.apache.phoenix.schema.PTable) PDatum(org.apache.phoenix.schema.PDatum) SelectStatement(org.apache.phoenix.parse.SelectStatement) SubqueryParseNode(org.apache.phoenix.parse.SubqueryParseNode) SubqueryParseNode(org.apache.phoenix.parse.SubqueryParseNode) EqualParseNode(org.apache.phoenix.parse.EqualParseNode) ParseNode(org.apache.phoenix.parse.ParseNode) ClientAggregatePlan(org.apache.phoenix.execute.ClientAggregatePlan) AggregatePlan(org.apache.phoenix.execute.AggregatePlan) OrderBy(org.apache.phoenix.compile.OrderByCompiler.OrderBy) LiteralResultIterationPlan(org.apache.phoenix.execute.LiteralResultIterationPlan) ClientScanPlan(org.apache.phoenix.execute.ClientScanPlan) ScanPlan(org.apache.phoenix.execute.ScanPlan) GroupBy(org.apache.phoenix.compile.GroupByCompiler.GroupBy) WhereClauseSubPlan(org.apache.phoenix.execute.HashJoinPlan.WhereClauseSubPlan) ParallelIteratorFactory(org.apache.phoenix.iterate.ParallelIteratorFactory) Hint(org.apache.phoenix.parse.HintNode.Hint) ClientScanPlan(org.apache.phoenix.execute.ClientScanPlan) SQLParser(org.apache.phoenix.parse.SQLParser) Expression(org.apache.phoenix.expression.Expression) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) RowValueConstructorExpression(org.apache.phoenix.expression.RowValueConstructorExpression) TableRef(org.apache.phoenix.schema.TableRef)

Example 4 with AggregatePlan

use of org.apache.phoenix.execute.AggregatePlan in project phoenix by apache.

the class HavingCompilerTest method compileStatement.

private static Expressions compileStatement(String query, List<Object> binds) throws SQLException {
    PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
    PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
    TestUtil.bindParams(pstmt, binds);
    QueryPlan plan = pstmt.compileQuery();
    assertTrue(plan instanceof AggregatePlan);
    Filter filter = plan.getContext().getScan().getFilter();
    assertTrue(filter == null || filter instanceof BooleanExpressionFilter);
    BooleanExpressionFilter boolFilter = (BooleanExpressionFilter) filter;
    AggregatePlan aggPlan = (AggregatePlan) plan;
    return new Expressions(boolFilter == null ? null : boolFilter.getExpression(), aggPlan.getHaving());
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) BooleanExpressionFilter(org.apache.phoenix.filter.BooleanExpressionFilter) Filter(org.apache.hadoop.hbase.filter.Filter) AggregatePlan(org.apache.phoenix.execute.AggregatePlan) PhoenixPreparedStatement(org.apache.phoenix.jdbc.PhoenixPreparedStatement) BooleanExpressionFilter(org.apache.phoenix.filter.BooleanExpressionFilter)

Example 5 with AggregatePlan

use of org.apache.phoenix.execute.AggregatePlan in project phoenix by apache.

the class DeleteCompiler method compile.

public MutationPlan compile(DeleteStatement delete) throws SQLException {
    final PhoenixConnection connection = statement.getConnection();
    final boolean isAutoCommit = connection.getAutoCommit();
    final boolean hasLimit = delete.getLimit() != null;
    final ConnectionQueryServices services = connection.getQueryServices();
    List<QueryPlan> queryPlans;
    NamedTableNode tableNode = delete.getTable();
    String tableName = tableNode.getName().getTableName();
    String schemaName = tableNode.getName().getSchemaName();
    boolean retryOnce = !isAutoCommit;
    TableRef tableRefToBe;
    boolean noQueryReqd = false;
    boolean runOnServer = false;
    SelectStatement select = null;
    ColumnResolver resolverToBe = null;
    Map<PTableKey, PTable> immutableIndex = Collections.emptyMap();
    DeletingParallelIteratorFactory parallelIteratorFactory;
    QueryPlan dataPlanToBe = null;
    while (true) {
        try {
            resolverToBe = FromCompiler.getResolverForMutation(delete, connection);
            tableRefToBe = resolverToBe.getTables().get(0);
            PTable table = tableRefToBe.getTable();
            // TODO: SchemaUtil.isReadOnly(PTable, connection)?
            if (table.getType() == PTableType.VIEW && table.getViewType().isReadOnly()) {
                throw new ReadOnlyTableException(schemaName, tableName);
            } else if (table.isTransactional() && connection.getSCN() != null) {
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SPECIFY_SCN_FOR_TXN_TABLE).setSchemaName(schemaName).setTableName(tableName).build().buildException();
            }
            immutableIndex = getNonDisabledImmutableIndexes(tableRefToBe);
            boolean mayHaveImmutableIndexes = !immutableIndex.isEmpty();
            noQueryReqd = !hasLimit;
            // Can't run on same server for transactional data, as we need the row keys for the data
            // that is being upserted for conflict detection purposes.
            runOnServer = isAutoCommit && noQueryReqd && !table.isTransactional();
            HintNode hint = delete.getHint();
            if (runOnServer && !delete.getHint().hasHint(Hint.USE_INDEX_OVER_DATA_TABLE)) {
                hint = HintNode.create(hint, Hint.USE_DATA_OVER_INDEX_TABLE);
            }
            List<AliasedNode> aliasedNodes = Lists.newArrayListWithExpectedSize(table.getPKColumns().size());
            boolean isSalted = table.getBucketNum() != null;
            boolean isMultiTenant = connection.getTenantId() != null && table.isMultiTenant();
            boolean isSharedViewIndex = table.getViewIndexId() != null;
            for (int i = (isSalted ? 1 : 0) + (isMultiTenant ? 1 : 0) + (isSharedViewIndex ? 1 : 0); i < table.getPKColumns().size(); i++) {
                PColumn column = table.getPKColumns().get(i);
                aliasedNodes.add(FACTORY.aliasedNode(null, FACTORY.column(null, '"' + column.getName().getString() + '"', null)));
            }
            select = FACTORY.select(delete.getTable(), hint, false, aliasedNodes, delete.getWhere(), Collections.<ParseNode>emptyList(), null, delete.getOrderBy(), delete.getLimit(), null, delete.getBindCount(), false, false, Collections.<SelectStatement>emptyList(), delete.getUdfParseNodes());
            select = StatementNormalizer.normalize(select, resolverToBe);
            SelectStatement transformedSelect = SubqueryRewriter.transform(select, resolverToBe, connection);
            if (transformedSelect != select) {
                resolverToBe = FromCompiler.getResolverForQuery(transformedSelect, connection, false, delete.getTable().getName());
                select = StatementNormalizer.normalize(transformedSelect, resolverToBe);
            }
            parallelIteratorFactory = hasLimit ? null : new DeletingParallelIteratorFactory(connection);
            QueryOptimizer optimizer = new QueryOptimizer(services);
            QueryCompiler compiler = new QueryCompiler(statement, select, resolverToBe, Collections.<PColumn>emptyList(), parallelIteratorFactory, new SequenceManager(statement));
            dataPlanToBe = compiler.compile();
            queryPlans = Lists.newArrayList(mayHaveImmutableIndexes ? optimizer.getApplicablePlans(dataPlanToBe, statement, select, resolverToBe, Collections.<PColumn>emptyList(), parallelIteratorFactory) : optimizer.getBestPlan(dataPlanToBe, statement, select, resolverToBe, Collections.<PColumn>emptyList(), parallelIteratorFactory));
            if (mayHaveImmutableIndexes) {
                // FIXME: this is ugly
                // Lookup the table being deleted from in the cache, as it's possible that the
                // optimizer updated the cache if it found indexes that were out of date.
                // If the index was marked as disabled, it should not be in the list
                // of immutable indexes.
                table = connection.getTable(new PTableKey(table.getTenantId(), table.getName().getString()));
                tableRefToBe.setTable(table);
                immutableIndex = getNonDisabledImmutableIndexes(tableRefToBe);
            }
        } catch (MetaDataEntityNotFoundException e) {
            // Otherwise throw, as we'll just get the same error next time.
            if (retryOnce) {
                retryOnce = false;
                MetaDataMutationResult result = new MetaDataClient(connection).updateCache(schemaName, tableName);
                if (result.wasUpdated()) {
                    continue;
                }
            }
            throw e;
        }
        break;
    }
    boolean isBuildingImmutable = false;
    final boolean hasImmutableIndexes = !immutableIndex.isEmpty();
    if (hasImmutableIndexes) {
        for (PTable index : immutableIndex.values()) {
            if (index.getIndexState() == PIndexState.BUILDING) {
                isBuildingImmutable = true;
                break;
            }
        }
    }
    final QueryPlan dataPlan = dataPlanToBe;
    // tableRefs is parallel with queryPlans
    TableRef[] tableRefs = new TableRef[hasImmutableIndexes ? immutableIndex.size() : 1];
    if (hasImmutableIndexes) {
        int i = 0;
        Iterator<QueryPlan> plans = queryPlans.iterator();
        while (plans.hasNext()) {
            QueryPlan plan = plans.next();
            PTable table = plan.getTableRef().getTable();
            if (table.getType() == PTableType.INDEX) {
                // index plans
                tableRefs[i++] = plan.getTableRef();
                immutableIndex.remove(table.getKey());
            } else if (!isBuildingImmutable) {
                // data plan
                /*
                     * If we have immutable indexes that we need to maintain, don't execute the data plan
                     * as we can save a query by piggy-backing on any of the other index queries, since the
                     * PK columns that we need are always in each index row.
                     */
                plans.remove();
            }
        }
        /*
             * If we have any immutable indexes remaining, then that means that the plan for that index got filtered out
             * because it could not be executed. This would occur if a column in the where clause is not found in the
             * immutable index.
             */
        if (!immutableIndex.isEmpty()) {
            Collection<PTable> immutableIndexes = immutableIndex.values();
            if (!isBuildingImmutable || hasNonPKIndexedColumns(immutableIndexes)) {
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_FILTER_ON_IMMUTABLE_ROWS).setSchemaName(tableRefToBe.getTable().getSchemaName().getString()).setTableName(tableRefToBe.getTable().getTableName().getString()).build().buildException();
            }
            runOnServer = false;
        }
    }
    List<TableRef> buildingImmutableIndexes = Lists.newArrayListWithExpectedSize(immutableIndex.values().size());
    for (PTable index : immutableIndex.values()) {
        buildingImmutableIndexes.add(new TableRef(index, dataPlan.getTableRef().getTimeStamp(), dataPlan.getTableRef().getLowerBoundTimeStamp()));
    }
    // Make sure the first plan is targeting deletion from the data table
    // In the case of an immutable index, we'll also delete from the index.
    final TableRef dataTableRef = tableRefs[0] = tableRefToBe;
    /*
         * Create a mutationPlan for each queryPlan. One plan will be for the deletion of the rows
         * from the data table, while the others will be for deleting rows from immutable indexes.
         */
    List<MutationPlan> mutationPlans = Lists.newArrayListWithExpectedSize(tableRefs.length);
    for (int i = 0; i < tableRefs.length; i++) {
        final TableRef tableRef = tableRefs[i];
        final QueryPlan plan = queryPlans.get(i);
        if (!plan.getTableRef().equals(tableRef) || !(plan instanceof BaseQueryPlan)) {
            runOnServer = false;
            // FIXME: why set this to false in this case?
            noQueryReqd = false;
        }
        final int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
        final int maxSizeBytes = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES);
        final StatementContext context = plan.getContext();
        // may have been optimized out. Instead, we check that there's a single SkipScanFilter
        if (noQueryReqd && (!context.getScan().hasFilter() || context.getScan().getFilter() instanceof SkipScanFilter) && context.getScanRanges().isPointLookup()) {
            mutationPlans.add(new MutationPlan() {

                @Override
                public ParameterMetaData getParameterMetaData() {
                    return context.getBindManager().getParameterMetaData();
                }

                @Override
                public MutationState execute() throws SQLException {
                    // We have a point lookup, so we know we have a simple set of fully qualified
                    // keys for our ranges
                    ScanRanges ranges = context.getScanRanges();
                    Iterator<KeyRange> iterator = ranges.getPointLookupKeyIterator();
                    Map<ImmutableBytesPtr, RowMutationState> mutation = Maps.newHashMapWithExpectedSize(ranges.getPointLookupCount());
                    while (iterator.hasNext()) {
                        mutation.put(new ImmutableBytesPtr(iterator.next().getLowerRange()), new RowMutationState(PRow.DELETE_MARKER, statement.getConnection().getStatementExecutionCounter(), NULL_ROWTIMESTAMP_INFO, null));
                    }
                    return new MutationState(tableRef, mutation, 0, maxSize, maxSizeBytes, connection);
                }

                @Override
                public ExplainPlan getExplainPlan() throws SQLException {
                    return new ExplainPlan(Collections.singletonList("DELETE SINGLE ROW"));
                }

                @Override
                public StatementContext getContext() {
                    return context;
                }

                @Override
                public TableRef getTargetRef() {
                    return dataTableRef;
                }

                @Override
                public Set<TableRef> getSourceRefs() {
                    // Don't include the target
                    return Collections.emptySet();
                }

                @Override
                public Operation getOperation() {
                    return operation;
                }

                @Override
                public Long getEstimatedRowsToScan() throws SQLException {
                    return 0l;
                }

                @Override
                public Long getEstimatedBytesToScan() throws SQLException {
                    return 0l;
                }
            });
        } else if (runOnServer) {
            // TODO: better abstraction
            Scan scan = context.getScan();
            // Propagate IGNORE_NEWER_MUTATIONS when replaying mutations since there will be
            // future dated data row mutations that will get in the way of generating the
            // correct index rows on replay.
            scan.setAttribute(BaseScannerRegionObserver.IGNORE_NEWER_MUTATIONS, PDataType.TRUE_BYTES);
            scan.setAttribute(BaseScannerRegionObserver.DELETE_AGG, QueryConstants.TRUE);
            // Build an ungrouped aggregate query: select COUNT(*) from <table> where <where>
            // The coprocessor will delete each row returned from the scan
            // Ignoring ORDER BY, since with auto commit on and no limit makes no difference
            SelectStatement aggSelect = SelectStatement.create(SelectStatement.COUNT_ONE, delete.getHint());
            RowProjector projectorToBe = ProjectionCompiler.compile(context, aggSelect, GroupBy.EMPTY_GROUP_BY);
            context.getAggregationManager().compile(context, GroupBy.EMPTY_GROUP_BY);
            if (plan.getProjector().projectEveryRow()) {
                projectorToBe = new RowProjector(projectorToBe, true);
            }
            final RowProjector projector = projectorToBe;
            final QueryPlan aggPlan = new AggregatePlan(context, select, tableRef, projector, null, null, OrderBy.EMPTY_ORDER_BY, null, GroupBy.EMPTY_GROUP_BY, null);
            mutationPlans.add(new MutationPlan() {

                @Override
                public ParameterMetaData getParameterMetaData() {
                    return context.getBindManager().getParameterMetaData();
                }

                @Override
                public StatementContext getContext() {
                    return context;
                }

                @Override
                public TableRef getTargetRef() {
                    return dataTableRef;
                }

                @Override
                public Set<TableRef> getSourceRefs() {
                    return dataPlan.getSourceRefs();
                }

                @Override
                public Operation getOperation() {
                    return operation;
                }

                @Override
                public MutationState execute() throws SQLException {
                    // TODO: share this block of code with UPSERT SELECT
                    ImmutableBytesWritable ptr = context.getTempPtr();
                    PTable table = tableRef.getTable();
                    table.getIndexMaintainers(ptr, context.getConnection());
                    byte[] txState = table.isTransactional() ? connection.getMutationState().encodeTransaction() : ByteUtil.EMPTY_BYTE_ARRAY;
                    ServerCache cache = null;
                    try {
                        if (ptr.getLength() > 0) {
                            byte[] uuidValue = ServerCacheClient.generateId();
                            context.getScan().setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
                            context.getScan().setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, ptr.get());
                            context.getScan().setAttribute(BaseScannerRegionObserver.TX_STATE, txState);
                        }
                        ResultIterator iterator = aggPlan.iterator();
                        try {
                            Tuple row = iterator.next();
                            final long mutationCount = (Long) projector.getColumnProjector(0).getValue(row, PLong.INSTANCE, ptr);
                            return new MutationState(maxSize, maxSizeBytes, connection) {

                                @Override
                                public long getUpdateCount() {
                                    return mutationCount;
                                }
                            };
                        } finally {
                            iterator.close();
                        }
                    } finally {
                        if (cache != null) {
                            cache.close();
                        }
                    }
                }

                @Override
                public ExplainPlan getExplainPlan() throws SQLException {
                    List<String> queryPlanSteps = aggPlan.getExplainPlan().getPlanSteps();
                    List<String> planSteps = Lists.newArrayListWithExpectedSize(queryPlanSteps.size() + 1);
                    planSteps.add("DELETE ROWS");
                    planSteps.addAll(queryPlanSteps);
                    return new ExplainPlan(planSteps);
                }

                @Override
                public Long getEstimatedRowsToScan() throws SQLException {
                    return aggPlan.getEstimatedRowsToScan();
                }

                @Override
                public Long getEstimatedBytesToScan() throws SQLException {
                    return aggPlan.getEstimatedBytesToScan();
                }
            });
        } else {
            List<TableRef> immutableIndexRefsToBe = Lists.newArrayListWithExpectedSize(dataPlan.getTableRef().getTable().getIndexes().size());
            if (!buildingImmutableIndexes.isEmpty()) {
                immutableIndexRefsToBe = buildingImmutableIndexes;
            } else if (hasImmutableIndexes && !plan.getTableRef().equals(tableRef)) {
                immutableIndexRefsToBe = Collections.singletonList(plan.getTableRef());
            }
            final List<TableRef> immutableIndexRefs = immutableIndexRefsToBe;
            final DeletingParallelIteratorFactory parallelIteratorFactory2 = parallelIteratorFactory;
            mutationPlans.add(new MutationPlan() {

                @Override
                public ParameterMetaData getParameterMetaData() {
                    return context.getBindManager().getParameterMetaData();
                }

                @Override
                public StatementContext getContext() {
                    return context;
                }

                @Override
                public TableRef getTargetRef() {
                    return dataTableRef;
                }

                @Override
                public Set<TableRef> getSourceRefs() {
                    return dataPlan.getSourceRefs();
                }

                @Override
                public Operation getOperation() {
                    return operation;
                }

                @Override
                public MutationState execute() throws SQLException {
                    ResultIterator iterator = plan.iterator();
                    try {
                        if (!hasLimit) {
                            Tuple tuple;
                            long totalRowCount = 0;
                            if (parallelIteratorFactory2 != null) {
                                parallelIteratorFactory2.setRowProjector(plan.getProjector());
                                parallelIteratorFactory2.setTargetTableRef(tableRef);
                                parallelIteratorFactory2.setSourceTableRef(plan.getTableRef());
                                parallelIteratorFactory2.setIndexTargetTableRefs(immutableIndexRefs);
                            }
                            while ((tuple = iterator.next()) != null) {
                                // Runs query
                                Cell kv = tuple.getValue(0);
                                totalRowCount += PLong.INSTANCE.getCodec().decodeLong(kv.getValueArray(), kv.getValueOffset(), SortOrder.getDefault());
                            }
                            // Return total number of rows that have been delete. In the case of auto commit being off
                            // the mutations will all be in the mutation state of the current connection.
                            MutationState state = new MutationState(maxSize, maxSizeBytes, connection, totalRowCount);
                            // set the read metrics accumulated in the parent context so that it can be published when the mutations are committed.
                            state.setReadMetricQueue(plan.getContext().getReadMetricsQueue());
                            return state;
                        } else {
                            return deleteRows(plan.getContext(), tableRef, immutableIndexRefs, iterator, plan.getProjector(), plan.getTableRef());
                        }
                    } finally {
                        iterator.close();
                    }
                }

                @Override
                public ExplainPlan getExplainPlan() throws SQLException {
                    List<String> queryPlanSteps = plan.getExplainPlan().getPlanSteps();
                    List<String> planSteps = Lists.newArrayListWithExpectedSize(queryPlanSteps.size() + 1);
                    planSteps.add("DELETE ROWS");
                    planSteps.addAll(queryPlanSteps);
                    return new ExplainPlan(planSteps);
                }

                @Override
                public Long getEstimatedRowsToScan() throws SQLException {
                    return plan.getEstimatedRowsToScan();
                }

                @Override
                public Long getEstimatedBytesToScan() throws SQLException {
                    return plan.getEstimatedBytesToScan();
                }
            });
        }
    }
    return mutationPlans.size() == 1 ? mutationPlans.get(0) : new MultiDeleteMutationPlan(mutationPlans);
}
Also used : PTable(org.apache.phoenix.schema.PTable) ParseNode(org.apache.phoenix.parse.ParseNode) List(java.util.List) AggregatePlan(org.apache.phoenix.execute.AggregatePlan) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo) Cell(org.apache.hadoop.hbase.Cell) MetaDataClient(org.apache.phoenix.schema.MetaDataClient) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) ResultIterator(org.apache.phoenix.iterate.ResultIterator) BaseQueryPlan(org.apache.phoenix.execute.BaseQueryPlan) AliasedNode(org.apache.phoenix.parse.AliasedNode) ReadOnlyTableException(org.apache.phoenix.schema.ReadOnlyTableException) HintNode(org.apache.phoenix.parse.HintNode) PLong(org.apache.phoenix.schema.types.PLong) Scan(org.apache.hadoop.hbase.client.Scan) Map(java.util.Map) HashMap(java.util.HashMap) ServerCache(org.apache.phoenix.cache.ServerCacheClient.ServerCache) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) Set(java.util.Set) PhoenixResultSet(org.apache.phoenix.jdbc.PhoenixResultSet) SQLException(java.sql.SQLException) Operation(org.apache.phoenix.jdbc.PhoenixStatement.Operation) BaseQueryPlan(org.apache.phoenix.execute.BaseQueryPlan) PColumn(org.apache.phoenix.schema.PColumn) SelectStatement(org.apache.phoenix.parse.SelectStatement) MetaDataEntityNotFoundException(org.apache.phoenix.schema.MetaDataEntityNotFoundException) ResultIterator(org.apache.phoenix.iterate.ResultIterator) Iterator(java.util.Iterator) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) SkipScanFilter(org.apache.phoenix.filter.SkipScanFilter) QueryOptimizer(org.apache.phoenix.optimize.QueryOptimizer) Hint(org.apache.phoenix.parse.HintNode.Hint) MutationState(org.apache.phoenix.execute.MutationState) RowMutationState(org.apache.phoenix.execute.MutationState.RowMutationState) NamedTableNode(org.apache.phoenix.parse.NamedTableNode) PTableKey(org.apache.phoenix.schema.PTableKey) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices) TableRef(org.apache.phoenix.schema.TableRef) ParameterMetaData(java.sql.ParameterMetaData) Tuple(org.apache.phoenix.schema.tuple.Tuple) RowMutationState(org.apache.phoenix.execute.MutationState.RowMutationState)

Aggregations

AggregatePlan (org.apache.phoenix.execute.AggregatePlan)6 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)4 SelectStatement (org.apache.phoenix.parse.SelectStatement)4 TableRef (org.apache.phoenix.schema.TableRef)4 Scan (org.apache.hadoop.hbase.client.Scan)3 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)3 MutationState (org.apache.phoenix.execute.MutationState)3 Hint (org.apache.phoenix.parse.HintNode.Hint)3 ParseNode (org.apache.phoenix.parse.ParseNode)3 SQLException (java.sql.SQLException)2 List (java.util.List)2 Cell (org.apache.hadoop.hbase.Cell)2 ServerCache (org.apache.phoenix.cache.ServerCacheClient.ServerCache)2 OrderBy (org.apache.phoenix.compile.OrderByCompiler.OrderBy)2 SQLExceptionInfo (org.apache.phoenix.exception.SQLExceptionInfo)2 ClientScanPlan (org.apache.phoenix.execute.ClientScanPlan)2 RowMutationState (org.apache.phoenix.execute.MutationState.RowMutationState)2 ScanPlan (org.apache.phoenix.execute.ScanPlan)2 Expression (org.apache.phoenix.expression.Expression)2 LiteralExpression (org.apache.phoenix.expression.LiteralExpression)2