Search in sources :

Example 26 with MutationState

use of org.apache.phoenix.execute.MutationState in project phoenix by apache.

the class MetaDataClient method createFunction.

public MutationState createFunction(CreateFunctionStatement stmt) throws SQLException {
    boolean wasAutoCommit = connection.getAutoCommit();
    connection.rollback();
    try {
        PFunction function = new PFunction(stmt.getFunctionInfo(), stmt.isTemporary(), stmt.isReplace());
        connection.setAutoCommit(false);
        String tenantIdStr = connection.getTenantId() == null ? null : connection.getTenantId().getString();
        List<Mutation> functionData = Lists.newArrayListWithExpectedSize(function.getFunctionArguments().size() + 1);
        List<FunctionArgument> args = function.getFunctionArguments();
        try (PreparedStatement argUpsert = connection.prepareStatement(INSERT_FUNCTION_ARGUMENT)) {
            for (int i = 0; i < args.size(); i++) {
                FunctionArgument arg = args.get(i);
                addFunctionArgMutation(function.getFunctionName(), arg, argUpsert, i);
            }
            functionData.addAll(connection.getMutationState().toMutations().next().getSecond());
            connection.rollback();
        }
        try (PreparedStatement functionUpsert = connection.prepareStatement(CREATE_FUNCTION)) {
            functionUpsert.setString(1, tenantIdStr);
            functionUpsert.setString(2, function.getFunctionName());
            functionUpsert.setInt(3, function.getFunctionArguments().size());
            functionUpsert.setString(4, function.getClassName());
            functionUpsert.setString(5, function.getJarPath());
            functionUpsert.setString(6, function.getReturnType());
            functionUpsert.execute();
            functionData.addAll(connection.getMutationState().toMutations(null).next().getSecond());
            connection.rollback();
        }
        MetaDataMutationResult result = connection.getQueryServices().createFunction(functionData, function, stmt.isTemporary());
        MutationCode code = result.getMutationCode();
        switch(code) {
            case FUNCTION_ALREADY_EXISTS:
                if (!function.isReplace()) {
                    throw new FunctionAlreadyExistsException(function.getFunctionName(), result.getFunctions().get(0));
                } else {
                    connection.removeFunction(function.getTenantId(), function.getFunctionName(), result.getMutationTime());
                    addFunctionToCache(result);
                }
            case NEWER_FUNCTION_FOUND:
                // it to this connection as we can't see it.
                throw new NewerFunctionAlreadyExistsException(function.getFunctionName(), result.getFunctions().get(0));
            default:
                List<PFunction> functions = new ArrayList<PFunction>(1);
                functions.add(function);
                result = new MetaDataMutationResult(code, result.getMutationTime(), functions, true);
                if (function.isReplace()) {
                    connection.removeFunction(function.getTenantId(), function.getFunctionName(), result.getMutationTime());
                }
                addFunctionToCache(result);
        }
    } finally {
        connection.setAutoCommit(wasAutoCommit);
    }
    return new MutationState(1, 1000, connection);
}
Also used : PFunction(org.apache.phoenix.parse.PFunction) ArrayList(java.util.ArrayList) PreparedStatement(java.sql.PreparedStatement) IndexKeyConstraint(org.apache.phoenix.parse.IndexKeyConstraint) PrimaryKeyConstraint(org.apache.phoenix.parse.PrimaryKeyConstraint) ColumnDefInPkConstraint(org.apache.phoenix.parse.ColumnDefInPkConstraint) MutationCode(org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode) MutationState(org.apache.phoenix.execute.MutationState) Mutation(org.apache.hadoop.hbase.client.Mutation) FunctionArgument(org.apache.phoenix.parse.PFunction.FunctionArgument) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)

Example 27 with MutationState

use of org.apache.phoenix.execute.MutationState in project phoenix by apache.

the class TxCheckpointIT method upsertRows.

private void upsertRows(Connection conn, String fullTableName) throws SQLException {
    ResultSet rs;
    MutationState state = conn.unwrap(PhoenixConnection.class).getMutationState();
    state.startTransaction();
    long wp = state.getWritePointer();
    conn.createStatement().execute("upsert into " + fullTableName + " select max(id)+1, 'a4', 'b4' from " + fullTableName + "");
    assertEquals(VisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT, state.getVisibilityLevel());
    // Make sure write ptr
    assertEquals(wp, state.getWritePointer());
    // didn't move
    rs = conn.createStatement().executeQuery("select max(id) from " + fullTableName + "");
    assertTrue(rs.next());
    assertEquals(4, rs.getLong(1));
    assertFalse(rs.next());
    conn.createStatement().execute("upsert into " + fullTableName + " select max(id)+1, 'a5', 'b5' from " + fullTableName + "");
    assertEquals(VisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT, state.getVisibilityLevel());
    // Make sure write ptr
    assertNotEquals(wp, state.getWritePointer());
    // moves
    wp = state.getWritePointer();
    rs = conn.createStatement().executeQuery("select max(id) from " + fullTableName + "");
    assertTrue(rs.next());
    assertEquals(5, rs.getLong(1));
    assertFalse(rs.next());
    conn.createStatement().execute("upsert into " + fullTableName + " select max(id)+1, 'a6', 'b6' from " + fullTableName + "");
    assertEquals(VisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT, state.getVisibilityLevel());
    // Make sure write ptr
    assertNotEquals(wp, state.getWritePointer());
    // moves
    wp = state.getWritePointer();
    rs = conn.createStatement().executeQuery("select max(id) from " + fullTableName + "");
    assertTrue(rs.next());
    assertEquals(6, rs.getLong(1));
    assertFalse(rs.next());
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) MutationState(org.apache.phoenix.execute.MutationState) ResultSet(java.sql.ResultSet)

Example 28 with MutationState

use of org.apache.phoenix.execute.MutationState in project phoenix by apache.

the class DeleteCompiler method compile.

public MutationPlan compile(DeleteStatement delete) throws SQLException {
    final PhoenixConnection connection = statement.getConnection();
    final boolean isAutoCommit = connection.getAutoCommit();
    final boolean hasLimit = delete.getLimit() != null;
    final ConnectionQueryServices services = connection.getQueryServices();
    List<QueryPlan> queryPlans;
    NamedTableNode tableNode = delete.getTable();
    String tableName = tableNode.getName().getTableName();
    String schemaName = tableNode.getName().getSchemaName();
    boolean retryOnce = !isAutoCommit;
    TableRef tableRefToBe;
    boolean noQueryReqd = false;
    boolean runOnServer = false;
    SelectStatement select = null;
    ColumnResolver resolverToBe = null;
    Map<PTableKey, PTable> immutableIndex = Collections.emptyMap();
    DeletingParallelIteratorFactory parallelIteratorFactory;
    QueryPlan dataPlanToBe = null;
    while (true) {
        try {
            resolverToBe = FromCompiler.getResolverForMutation(delete, connection);
            tableRefToBe = resolverToBe.getTables().get(0);
            PTable table = tableRefToBe.getTable();
            // TODO: SchemaUtil.isReadOnly(PTable, connection)?
            if (table.getType() == PTableType.VIEW && table.getViewType().isReadOnly()) {
                throw new ReadOnlyTableException(schemaName, tableName);
            } else if (table.isTransactional() && connection.getSCN() != null) {
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SPECIFY_SCN_FOR_TXN_TABLE).setSchemaName(schemaName).setTableName(tableName).build().buildException();
            }
            immutableIndex = getNonDisabledImmutableIndexes(tableRefToBe);
            boolean mayHaveImmutableIndexes = !immutableIndex.isEmpty();
            noQueryReqd = !hasLimit;
            // Can't run on same server for transactional data, as we need the row keys for the data
            // that is being upserted for conflict detection purposes.
            runOnServer = isAutoCommit && noQueryReqd && !table.isTransactional();
            HintNode hint = delete.getHint();
            if (runOnServer && !delete.getHint().hasHint(Hint.USE_INDEX_OVER_DATA_TABLE)) {
                hint = HintNode.create(hint, Hint.USE_DATA_OVER_INDEX_TABLE);
            }
            List<AliasedNode> aliasedNodes = Lists.newArrayListWithExpectedSize(table.getPKColumns().size());
            boolean isSalted = table.getBucketNum() != null;
            boolean isMultiTenant = connection.getTenantId() != null && table.isMultiTenant();
            boolean isSharedViewIndex = table.getViewIndexId() != null;
            for (int i = (isSalted ? 1 : 0) + (isMultiTenant ? 1 : 0) + (isSharedViewIndex ? 1 : 0); i < table.getPKColumns().size(); i++) {
                PColumn column = table.getPKColumns().get(i);
                aliasedNodes.add(FACTORY.aliasedNode(null, FACTORY.column(null, '"' + column.getName().getString() + '"', null)));
            }
            select = FACTORY.select(delete.getTable(), hint, false, aliasedNodes, delete.getWhere(), Collections.<ParseNode>emptyList(), null, delete.getOrderBy(), delete.getLimit(), null, delete.getBindCount(), false, false, Collections.<SelectStatement>emptyList(), delete.getUdfParseNodes());
            select = StatementNormalizer.normalize(select, resolverToBe);
            SelectStatement transformedSelect = SubqueryRewriter.transform(select, resolverToBe, connection);
            if (transformedSelect != select) {
                resolverToBe = FromCompiler.getResolverForQuery(transformedSelect, connection, false, delete.getTable().getName());
                select = StatementNormalizer.normalize(transformedSelect, resolverToBe);
            }
            parallelIteratorFactory = hasLimit ? null : new DeletingParallelIteratorFactory(connection);
            QueryOptimizer optimizer = new QueryOptimizer(services);
            QueryCompiler compiler = new QueryCompiler(statement, select, resolverToBe, Collections.<PColumn>emptyList(), parallelIteratorFactory, new SequenceManager(statement));
            dataPlanToBe = compiler.compile();
            queryPlans = Lists.newArrayList(mayHaveImmutableIndexes ? optimizer.getApplicablePlans(dataPlanToBe, statement, select, resolverToBe, Collections.<PColumn>emptyList(), parallelIteratorFactory) : optimizer.getBestPlan(dataPlanToBe, statement, select, resolverToBe, Collections.<PColumn>emptyList(), parallelIteratorFactory));
            if (mayHaveImmutableIndexes) {
                // FIXME: this is ugly
                // Lookup the table being deleted from in the cache, as it's possible that the
                // optimizer updated the cache if it found indexes that were out of date.
                // If the index was marked as disabled, it should not be in the list
                // of immutable indexes.
                table = connection.getTable(new PTableKey(table.getTenantId(), table.getName().getString()));
                tableRefToBe.setTable(table);
                immutableIndex = getNonDisabledImmutableIndexes(tableRefToBe);
            }
        } catch (MetaDataEntityNotFoundException e) {
            // Otherwise throw, as we'll just get the same error next time.
            if (retryOnce) {
                retryOnce = false;
                MetaDataMutationResult result = new MetaDataClient(connection).updateCache(schemaName, tableName);
                if (result.wasUpdated()) {
                    continue;
                }
            }
            throw e;
        }
        break;
    }
    boolean isBuildingImmutable = false;
    final boolean hasImmutableIndexes = !immutableIndex.isEmpty();
    if (hasImmutableIndexes) {
        for (PTable index : immutableIndex.values()) {
            if (index.getIndexState() == PIndexState.BUILDING) {
                isBuildingImmutable = true;
                break;
            }
        }
    }
    final QueryPlan dataPlan = dataPlanToBe;
    // tableRefs is parallel with queryPlans
    TableRef[] tableRefs = new TableRef[hasImmutableIndexes ? immutableIndex.size() : 1];
    if (hasImmutableIndexes) {
        int i = 0;
        Iterator<QueryPlan> plans = queryPlans.iterator();
        while (plans.hasNext()) {
            QueryPlan plan = plans.next();
            PTable table = plan.getTableRef().getTable();
            if (table.getType() == PTableType.INDEX) {
                // index plans
                tableRefs[i++] = plan.getTableRef();
                immutableIndex.remove(table.getKey());
            } else if (!isBuildingImmutable) {
                // data plan
                /*
                     * If we have immutable indexes that we need to maintain, don't execute the data plan
                     * as we can save a query by piggy-backing on any of the other index queries, since the
                     * PK columns that we need are always in each index row.
                     */
                plans.remove();
            }
        }
        /*
             * If we have any immutable indexes remaining, then that means that the plan for that index got filtered out
             * because it could not be executed. This would occur if a column in the where clause is not found in the
             * immutable index.
             */
        if (!immutableIndex.isEmpty()) {
            Collection<PTable> immutableIndexes = immutableIndex.values();
            if (!isBuildingImmutable || hasNonPKIndexedColumns(immutableIndexes)) {
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_FILTER_ON_IMMUTABLE_ROWS).setSchemaName(tableRefToBe.getTable().getSchemaName().getString()).setTableName(tableRefToBe.getTable().getTableName().getString()).build().buildException();
            }
            runOnServer = false;
        }
    }
    List<TableRef> buildingImmutableIndexes = Lists.newArrayListWithExpectedSize(immutableIndex.values().size());
    for (PTable index : immutableIndex.values()) {
        buildingImmutableIndexes.add(new TableRef(index, dataPlan.getTableRef().getTimeStamp(), dataPlan.getTableRef().getLowerBoundTimeStamp()));
    }
    // Make sure the first plan is targeting deletion from the data table
    // In the case of an immutable index, we'll also delete from the index.
    final TableRef dataTableRef = tableRefs[0] = tableRefToBe;
    /*
         * Create a mutationPlan for each queryPlan. One plan will be for the deletion of the rows
         * from the data table, while the others will be for deleting rows from immutable indexes.
         */
    List<MutationPlan> mutationPlans = Lists.newArrayListWithExpectedSize(tableRefs.length);
    for (int i = 0; i < tableRefs.length; i++) {
        final TableRef tableRef = tableRefs[i];
        final QueryPlan plan = queryPlans.get(i);
        if (!plan.getTableRef().equals(tableRef) || !(plan instanceof BaseQueryPlan)) {
            runOnServer = false;
            // FIXME: why set this to false in this case?
            noQueryReqd = false;
        }
        final int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
        final int maxSizeBytes = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES);
        final StatementContext context = plan.getContext();
        // may have been optimized out. Instead, we check that there's a single SkipScanFilter
        if (noQueryReqd && (!context.getScan().hasFilter() || context.getScan().getFilter() instanceof SkipScanFilter) && context.getScanRanges().isPointLookup()) {
            mutationPlans.add(new MutationPlan() {

                @Override
                public ParameterMetaData getParameterMetaData() {
                    return context.getBindManager().getParameterMetaData();
                }

                @Override
                public MutationState execute() throws SQLException {
                    // We have a point lookup, so we know we have a simple set of fully qualified
                    // keys for our ranges
                    ScanRanges ranges = context.getScanRanges();
                    Iterator<KeyRange> iterator = ranges.getPointLookupKeyIterator();
                    Map<ImmutableBytesPtr, RowMutationState> mutation = Maps.newHashMapWithExpectedSize(ranges.getPointLookupCount());
                    while (iterator.hasNext()) {
                        mutation.put(new ImmutableBytesPtr(iterator.next().getLowerRange()), new RowMutationState(PRow.DELETE_MARKER, statement.getConnection().getStatementExecutionCounter(), NULL_ROWTIMESTAMP_INFO, null));
                    }
                    return new MutationState(tableRef, mutation, 0, maxSize, maxSizeBytes, connection);
                }

                @Override
                public ExplainPlan getExplainPlan() throws SQLException {
                    return new ExplainPlan(Collections.singletonList("DELETE SINGLE ROW"));
                }

                @Override
                public StatementContext getContext() {
                    return context;
                }

                @Override
                public TableRef getTargetRef() {
                    return dataTableRef;
                }

                @Override
                public Set<TableRef> getSourceRefs() {
                    // Don't include the target
                    return Collections.emptySet();
                }

                @Override
                public Operation getOperation() {
                    return operation;
                }

                @Override
                public Long getEstimatedRowsToScan() throws SQLException {
                    return 0l;
                }

                @Override
                public Long getEstimatedBytesToScan() throws SQLException {
                    return 0l;
                }
            });
        } else if (runOnServer) {
            // TODO: better abstraction
            Scan scan = context.getScan();
            // Propagate IGNORE_NEWER_MUTATIONS when replaying mutations since there will be
            // future dated data row mutations that will get in the way of generating the
            // correct index rows on replay.
            scan.setAttribute(BaseScannerRegionObserver.IGNORE_NEWER_MUTATIONS, PDataType.TRUE_BYTES);
            scan.setAttribute(BaseScannerRegionObserver.DELETE_AGG, QueryConstants.TRUE);
            // Build an ungrouped aggregate query: select COUNT(*) from <table> where <where>
            // The coprocessor will delete each row returned from the scan
            // Ignoring ORDER BY, since with auto commit on and no limit makes no difference
            SelectStatement aggSelect = SelectStatement.create(SelectStatement.COUNT_ONE, delete.getHint());
            RowProjector projectorToBe = ProjectionCompiler.compile(context, aggSelect, GroupBy.EMPTY_GROUP_BY);
            context.getAggregationManager().compile(context, GroupBy.EMPTY_GROUP_BY);
            if (plan.getProjector().projectEveryRow()) {
                projectorToBe = new RowProjector(projectorToBe, true);
            }
            final RowProjector projector = projectorToBe;
            final QueryPlan aggPlan = new AggregatePlan(context, select, tableRef, projector, null, null, OrderBy.EMPTY_ORDER_BY, null, GroupBy.EMPTY_GROUP_BY, null);
            mutationPlans.add(new MutationPlan() {

                @Override
                public ParameterMetaData getParameterMetaData() {
                    return context.getBindManager().getParameterMetaData();
                }

                @Override
                public StatementContext getContext() {
                    return context;
                }

                @Override
                public TableRef getTargetRef() {
                    return dataTableRef;
                }

                @Override
                public Set<TableRef> getSourceRefs() {
                    return dataPlan.getSourceRefs();
                }

                @Override
                public Operation getOperation() {
                    return operation;
                }

                @Override
                public MutationState execute() throws SQLException {
                    // TODO: share this block of code with UPSERT SELECT
                    ImmutableBytesWritable ptr = context.getTempPtr();
                    PTable table = tableRef.getTable();
                    table.getIndexMaintainers(ptr, context.getConnection());
                    byte[] txState = table.isTransactional() ? connection.getMutationState().encodeTransaction() : ByteUtil.EMPTY_BYTE_ARRAY;
                    ServerCache cache = null;
                    try {
                        if (ptr.getLength() > 0) {
                            byte[] uuidValue = ServerCacheClient.generateId();
                            context.getScan().setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
                            context.getScan().setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, ptr.get());
                            context.getScan().setAttribute(BaseScannerRegionObserver.TX_STATE, txState);
                        }
                        ResultIterator iterator = aggPlan.iterator();
                        try {
                            Tuple row = iterator.next();
                            final long mutationCount = (Long) projector.getColumnProjector(0).getValue(row, PLong.INSTANCE, ptr);
                            return new MutationState(maxSize, maxSizeBytes, connection) {

                                @Override
                                public long getUpdateCount() {
                                    return mutationCount;
                                }
                            };
                        } finally {
                            iterator.close();
                        }
                    } finally {
                        if (cache != null) {
                            cache.close();
                        }
                    }
                }

                @Override
                public ExplainPlan getExplainPlan() throws SQLException {
                    List<String> queryPlanSteps = aggPlan.getExplainPlan().getPlanSteps();
                    List<String> planSteps = Lists.newArrayListWithExpectedSize(queryPlanSteps.size() + 1);
                    planSteps.add("DELETE ROWS");
                    planSteps.addAll(queryPlanSteps);
                    return new ExplainPlan(planSteps);
                }

                @Override
                public Long getEstimatedRowsToScan() throws SQLException {
                    return aggPlan.getEstimatedRowsToScan();
                }

                @Override
                public Long getEstimatedBytesToScan() throws SQLException {
                    return aggPlan.getEstimatedBytesToScan();
                }
            });
        } else {
            List<TableRef> immutableIndexRefsToBe = Lists.newArrayListWithExpectedSize(dataPlan.getTableRef().getTable().getIndexes().size());
            if (!buildingImmutableIndexes.isEmpty()) {
                immutableIndexRefsToBe = buildingImmutableIndexes;
            } else if (hasImmutableIndexes && !plan.getTableRef().equals(tableRef)) {
                immutableIndexRefsToBe = Collections.singletonList(plan.getTableRef());
            }
            final List<TableRef> immutableIndexRefs = immutableIndexRefsToBe;
            final DeletingParallelIteratorFactory parallelIteratorFactory2 = parallelIteratorFactory;
            mutationPlans.add(new MutationPlan() {

                @Override
                public ParameterMetaData getParameterMetaData() {
                    return context.getBindManager().getParameterMetaData();
                }

                @Override
                public StatementContext getContext() {
                    return context;
                }

                @Override
                public TableRef getTargetRef() {
                    return dataTableRef;
                }

                @Override
                public Set<TableRef> getSourceRefs() {
                    return dataPlan.getSourceRefs();
                }

                @Override
                public Operation getOperation() {
                    return operation;
                }

                @Override
                public MutationState execute() throws SQLException {
                    ResultIterator iterator = plan.iterator();
                    try {
                        if (!hasLimit) {
                            Tuple tuple;
                            long totalRowCount = 0;
                            if (parallelIteratorFactory2 != null) {
                                parallelIteratorFactory2.setRowProjector(plan.getProjector());
                                parallelIteratorFactory2.setTargetTableRef(tableRef);
                                parallelIteratorFactory2.setSourceTableRef(plan.getTableRef());
                                parallelIteratorFactory2.setIndexTargetTableRefs(immutableIndexRefs);
                            }
                            while ((tuple = iterator.next()) != null) {
                                // Runs query
                                Cell kv = tuple.getValue(0);
                                totalRowCount += PLong.INSTANCE.getCodec().decodeLong(kv.getValueArray(), kv.getValueOffset(), SortOrder.getDefault());
                            }
                            // Return total number of rows that have been delete. In the case of auto commit being off
                            // the mutations will all be in the mutation state of the current connection.
                            MutationState state = new MutationState(maxSize, maxSizeBytes, connection, totalRowCount);
                            // set the read metrics accumulated in the parent context so that it can be published when the mutations are committed.
                            state.setReadMetricQueue(plan.getContext().getReadMetricsQueue());
                            return state;
                        } else {
                            return deleteRows(plan.getContext(), tableRef, immutableIndexRefs, iterator, plan.getProjector(), plan.getTableRef());
                        }
                    } finally {
                        iterator.close();
                    }
                }

                @Override
                public ExplainPlan getExplainPlan() throws SQLException {
                    List<String> queryPlanSteps = plan.getExplainPlan().getPlanSteps();
                    List<String> planSteps = Lists.newArrayListWithExpectedSize(queryPlanSteps.size() + 1);
                    planSteps.add("DELETE ROWS");
                    planSteps.addAll(queryPlanSteps);
                    return new ExplainPlan(planSteps);
                }

                @Override
                public Long getEstimatedRowsToScan() throws SQLException {
                    return plan.getEstimatedRowsToScan();
                }

                @Override
                public Long getEstimatedBytesToScan() throws SQLException {
                    return plan.getEstimatedBytesToScan();
                }
            });
        }
    }
    return mutationPlans.size() == 1 ? mutationPlans.get(0) : new MultiDeleteMutationPlan(mutationPlans);
}
Also used : PTable(org.apache.phoenix.schema.PTable) ParseNode(org.apache.phoenix.parse.ParseNode) List(java.util.List) AggregatePlan(org.apache.phoenix.execute.AggregatePlan) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo) Cell(org.apache.hadoop.hbase.Cell) MetaDataClient(org.apache.phoenix.schema.MetaDataClient) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) ResultIterator(org.apache.phoenix.iterate.ResultIterator) BaseQueryPlan(org.apache.phoenix.execute.BaseQueryPlan) AliasedNode(org.apache.phoenix.parse.AliasedNode) ReadOnlyTableException(org.apache.phoenix.schema.ReadOnlyTableException) HintNode(org.apache.phoenix.parse.HintNode) PLong(org.apache.phoenix.schema.types.PLong) Scan(org.apache.hadoop.hbase.client.Scan) Map(java.util.Map) HashMap(java.util.HashMap) ServerCache(org.apache.phoenix.cache.ServerCacheClient.ServerCache) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) Set(java.util.Set) PhoenixResultSet(org.apache.phoenix.jdbc.PhoenixResultSet) SQLException(java.sql.SQLException) Operation(org.apache.phoenix.jdbc.PhoenixStatement.Operation) BaseQueryPlan(org.apache.phoenix.execute.BaseQueryPlan) PColumn(org.apache.phoenix.schema.PColumn) SelectStatement(org.apache.phoenix.parse.SelectStatement) MetaDataEntityNotFoundException(org.apache.phoenix.schema.MetaDataEntityNotFoundException) ResultIterator(org.apache.phoenix.iterate.ResultIterator) Iterator(java.util.Iterator) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) SkipScanFilter(org.apache.phoenix.filter.SkipScanFilter) QueryOptimizer(org.apache.phoenix.optimize.QueryOptimizer) Hint(org.apache.phoenix.parse.HintNode.Hint) MutationState(org.apache.phoenix.execute.MutationState) RowMutationState(org.apache.phoenix.execute.MutationState.RowMutationState) NamedTableNode(org.apache.phoenix.parse.NamedTableNode) PTableKey(org.apache.phoenix.schema.PTableKey) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices) TableRef(org.apache.phoenix.schema.TableRef) ParameterMetaData(java.sql.ParameterMetaData) Tuple(org.apache.phoenix.schema.tuple.Tuple) RowMutationState(org.apache.phoenix.execute.MutationState.RowMutationState)

Example 29 with MutationState

use of org.apache.phoenix.execute.MutationState in project phoenix by apache.

the class MutatingParallelIteratorFactory method newIterator.

@Override
public PeekingResultIterator newIterator(final StatementContext parentContext, ResultIterator iterator, Scan scan, String tableName, QueryPlan plan) throws SQLException {
    final PhoenixConnection clonedConnection = new PhoenixConnection(this.connection);
    MutationState state = mutate(parentContext, iterator, clonedConnection);
    long totalRowCount = state.getUpdateCount();
    if (clonedConnection.getAutoCommit()) {
        clonedConnection.getMutationState().join(state);
        state = clonedConnection.getMutationState();
    }
    final MutationState finalState = state;
    byte[] value = PLong.INSTANCE.toBytes(totalRowCount);
    KeyValue keyValue = KeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length);
    final Tuple tuple = new SingleKeyValueTuple(keyValue);
    return new PeekingResultIterator() {

        private boolean done = false;

        @Override
        public Tuple next() throws SQLException {
            if (done) {
                return null;
            }
            done = true;
            return tuple;
        }

        @Override
        public void explain(List<String> planSteps) {
        }

        @Override
        public void close() throws SQLException {
            try {
                /* 
                     * Join the child mutation states in close, since this is called in a single threaded manner
                     * after the parallel results have been processed. 
                     * If auto-commit is on for the cloned child connection, then the finalState here is an empty mutation 
                     * state (with no mutations). However, it still has the metrics for mutation work done by the 
                     * mutating-iterator. Joining the mutation state makes sure those metrics are passed over
                     * to the parent connection.
                     */
                MutatingParallelIteratorFactory.this.connection.getMutationState().join(finalState);
            } finally {
                clonedConnection.close();
            }
        }

        @Override
        public Tuple peek() throws SQLException {
            return done ? null : tuple;
        }
    };
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) KeyValue(org.apache.hadoop.hbase.KeyValue) MutationState(org.apache.phoenix.execute.MutationState) SingleKeyValueTuple(org.apache.phoenix.schema.tuple.SingleKeyValueTuple) List(java.util.List) Tuple(org.apache.phoenix.schema.tuple.Tuple) SingleKeyValueTuple(org.apache.phoenix.schema.tuple.SingleKeyValueTuple) PeekingResultIterator(org.apache.phoenix.iterate.PeekingResultIterator)

Example 30 with MutationState

use of org.apache.phoenix.execute.MutationState in project phoenix by apache.

the class PostLocalIndexDDLCompiler method compile.

public MutationPlan compile(PTable index) throws SQLException {
    try (final PhoenixStatement statement = new PhoenixStatement(connection)) {
        String query = "SELECT count(*) FROM " + tableName;
        final QueryPlan plan = statement.compileQuery(query);
        TableRef tableRef = plan.getTableRef();
        Scan scan = plan.getContext().getScan();
        ImmutableBytesWritable ptr = new ImmutableBytesWritable();
        final PTable dataTable = tableRef.getTable();
        List<PTable> indexes = Lists.newArrayListWithExpectedSize(1);
        for (PTable indexTable : dataTable.getIndexes()) {
            if (indexTable.getKey().equals(index.getKey())) {
                index = indexTable;
                break;
            }
        }
        // Only build newly created index.
        indexes.add(index);
        IndexMaintainer.serialize(dataTable, ptr, indexes, plan.getContext().getConnection());
        // Set attribute on scan that UngroupedAggregateRegionObserver will switch on.
        // We'll detect that this attribute was set the server-side and write the index
        // rows per region as a result. The value of the attribute will be our persisted
        // index maintainers.
        // Define the LOCAL_INDEX_BUILD as a new static in BaseScannerRegionObserver
        scan.setAttribute(BaseScannerRegionObserver.LOCAL_INDEX_BUILD_PROTO, ByteUtil.copyKeyBytesIfNecessary(ptr));
        // By default, we'd use a FirstKeyOnly filter as nothing else needs to be projected for count(*).
        // However, in this case, we need to project all of the data columns that contribute to the index.
        IndexMaintainer indexMaintainer = index.getIndexMaintainer(dataTable, connection);
        for (ColumnReference columnRef : indexMaintainer.getAllColumns()) {
            if (index.getImmutableStorageScheme() == ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS) {
                scan.addFamily(columnRef.getFamily());
            } else {
                scan.addColumn(columnRef.getFamily(), columnRef.getQualifier());
            }
        }
        // with a connectionless connection (which makes testing easier).
        return new BaseMutationPlan(plan.getContext(), Operation.UPSERT) {

            @Override
            public MutationState execute() throws SQLException {
                connection.getMutationState().commitDDLFence(dataTable);
                Tuple tuple = plan.iterator().next();
                long rowCount = 0;
                if (tuple != null) {
                    Cell kv = tuple.getValue(0);
                    ImmutableBytesWritable tmpPtr = new ImmutableBytesWritable(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength());
                    // A single Cell will be returned with the count(*) - we decode that here
                    rowCount = PLong.INSTANCE.getCodec().decodeLong(tmpPtr, SortOrder.getDefault());
                }
                // rows that were added.
                return new MutationState(0, 0, connection, rowCount);
            }
        };
    }
}
Also used : ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) PTable(org.apache.phoenix.schema.PTable) IndexMaintainer(org.apache.phoenix.index.IndexMaintainer) MutationState(org.apache.phoenix.execute.MutationState) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell) TableRef(org.apache.phoenix.schema.TableRef) Tuple(org.apache.phoenix.schema.tuple.Tuple) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference)

Aggregations

MutationState (org.apache.phoenix.execute.MutationState)33 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)12 PLong (org.apache.phoenix.schema.types.PLong)12 PUnsignedLong (org.apache.phoenix.schema.types.PUnsignedLong)11 MetaDataMutationResult (org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)10 MutationPlan (org.apache.phoenix.compile.MutationPlan)9 Mutation (org.apache.hadoop.hbase.client.Mutation)8 Scan (org.apache.hadoop.hbase.client.Scan)8 MutationCode (org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode)8 SQLExceptionInfo (org.apache.phoenix.exception.SQLExceptionInfo)8 SQLException (java.sql.SQLException)7 PTable (org.apache.phoenix.schema.PTable)7 PostDDLCompiler (org.apache.phoenix.compile.PostDDLCompiler)6 PhoenixStatement (org.apache.phoenix.jdbc.PhoenixStatement)6 PreparedStatement (java.sql.PreparedStatement)5 ArrayList (java.util.ArrayList)5 List (java.util.List)5 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)5 ColumnResolver (org.apache.phoenix.compile.ColumnResolver)5 HashMap (java.util.HashMap)4