Search in sources :

Example 26 with MetaDataMutationResult

use of org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult in project phoenix by apache.

the class PhoenixIndexFailurePolicy method handleFailureWithExceptions.

private long handleFailureWithExceptions(Multimap<HTableInterfaceReference, Mutation> attempted, Exception cause) throws Throwable {
    Set<HTableInterfaceReference> refs = attempted.asMap().keySet();
    Map<String, Long> indexTableNames = new HashMap<String, Long>(refs.size());
    // start by looking at all the tables to which we attempted to write
    long timestamp = 0;
    boolean leaveIndexActive = blockDataTableWritesOnFailure || !disableIndexOnFailure;
    for (HTableInterfaceReference ref : refs) {
        long minTimeStamp = 0;
        // get the minimum timestamp across all the mutations we attempted on that table
        // FIXME: all cell timestamps should be the same
        Collection<Mutation> mutations = attempted.get(ref);
        if (mutations != null) {
            for (Mutation m : mutations) {
                for (List<Cell> kvs : m.getFamilyCellMap().values()) {
                    for (Cell kv : kvs) {
                        if (minTimeStamp == 0 || (kv.getTimestamp() >= 0 && minTimeStamp > kv.getTimestamp())) {
                            minTimeStamp = kv.getTimestamp();
                        }
                    }
                }
            }
        }
        timestamp = minTimeStamp;
        // If the data table has local index column families then get local indexes to disable.
        if (ref.getTableName().equals(env.getRegion().getTableDesc().getNameAsString()) && MetaDataUtil.hasLocalIndexColumnFamily(env.getRegion().getTableDesc())) {
            for (String tableName : getLocalIndexNames(ref, mutations)) {
                indexTableNames.put(tableName, minTimeStamp);
            }
        } else {
            indexTableNames.put(ref.getTableName(), minTimeStamp);
        }
    }
    // Nothing to do if we're not disabling the index and not rebuilding on failure
    if (!disableIndexOnFailure && !rebuildIndexOnFailure) {
        return timestamp;
    }
    PIndexState newState = disableIndexOnFailure ? PIndexState.DISABLE : PIndexState.ACTIVE;
    // for all the index tables that we've found, try to disable them and if that fails, try to
    for (Map.Entry<String, Long> tableTimeElement : indexTableNames.entrySet()) {
        String indexTableName = tableTimeElement.getKey();
        long minTimeStamp = tableTimeElement.getValue();
        // time stamp to differentiate.
        if (!disableIndexOnFailure && !blockDataTableWritesOnFailure) {
            minTimeStamp *= -1;
        }
        // Disable the index by using the updateIndexState method of MetaDataProtocol end point coprocessor.
        HTableInterface systemTable = env.getTable(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, env.getConfiguration()));
        MetaDataMutationResult result = IndexUtil.setIndexDisableTimeStamp(indexTableName, minTimeStamp, systemTable, newState);
        if (result.getMutationCode() == MutationCode.TABLE_NOT_FOUND) {
            LOG.info("Index " + indexTableName + " has been dropped. Ignore uncommitted mutations");
            continue;
        }
        if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) {
            if (leaveIndexActive) {
                LOG.warn("Attempt to update INDEX_DISABLE_TIMESTAMP " + " failed with code = " + result.getMutationCode());
                // will lead to the RS being shutdown.
                if (blockDataTableWritesOnFailure) {
                    throw new DoNotRetryIOException("Attempt to update INDEX_DISABLE_TIMESTAMP failed.");
                }
            } else {
                LOG.warn("Attempt to disable index " + indexTableName + " failed with code = " + result.getMutationCode() + ". Will use default failure policy instead.");
                throw new DoNotRetryIOException("Attempt to disable " + indexTableName + " failed.");
            }
        }
        if (leaveIndexActive)
            LOG.info("Successfully update INDEX_DISABLE_TIMESTAMP for " + indexTableName + " due to an exception while writing updates.", cause);
        else
            LOG.info("Successfully disabled index " + indexTableName + " due to an exception while writing updates.", cause);
    }
    // Return the cell time stamp (note they should all be the same)
    return timestamp;
}
Also used : HashMap(java.util.HashMap) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) PIndexState(org.apache.phoenix.schema.PIndexState) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) HTableInterfaceReference(org.apache.phoenix.hbase.index.table.HTableInterfaceReference) Mutation(org.apache.hadoop.hbase.client.Mutation) Cell(org.apache.hadoop.hbase.Cell) HashMap(java.util.HashMap) Map(java.util.Map) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)

Example 27 with MetaDataMutationResult

use of org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult in project phoenix by apache.

the class PhoenixUtil method isDisabledWal.

public static boolean isDisabledWal(MetaDataClient metaDataClient, String tableName) throws SQLException {
    String[] schemaInfo = getTableSchema(tableName.toUpperCase());
    MetaDataMutationResult result = metaDataClient.updateCache(schemaInfo[0], schemaInfo[1]);
    PTable dataTable = result.getTable();
    return dataTable.isWALDisabled();
}
Also used : MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) PTable(org.apache.phoenix.schema.PTable)

Example 28 with MetaDataMutationResult

use of org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult in project phoenix by apache.

the class PhoenixRuntime method getTableNoCache.

public static PTable getTableNoCache(Connection conn, String name) throws SQLException {
    String schemaName = SchemaUtil.getSchemaNameFromFullName(name);
    String tableName = SchemaUtil.getTableNameFromFullName(name);
    PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
    MetaDataMutationResult result = new MetaDataClient(pconn).updateCache(pconn.getTenantId(), schemaName, tableName, true);
    if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) {
        throw new TableNotFoundException(schemaName, tableName);
    }
    return result.getTable();
}
Also used : MetaDataClient(org.apache.phoenix.schema.MetaDataClient) TableNotFoundException(org.apache.phoenix.schema.TableNotFoundException) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)

Example 29 with MetaDataMutationResult

use of org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult in project phoenix by apache.

the class DeleteCompiler method compile.

public MutationPlan compile(DeleteStatement delete) throws SQLException {
    final PhoenixConnection connection = statement.getConnection();
    final boolean isAutoCommit = connection.getAutoCommit();
    final boolean hasLimit = delete.getLimit() != null;
    final ConnectionQueryServices services = connection.getQueryServices();
    List<QueryPlan> queryPlans;
    NamedTableNode tableNode = delete.getTable();
    String tableName = tableNode.getName().getTableName();
    String schemaName = tableNode.getName().getSchemaName();
    boolean retryOnce = !isAutoCommit;
    TableRef tableRefToBe;
    boolean noQueryReqd = false;
    boolean runOnServer = false;
    SelectStatement select = null;
    ColumnResolver resolverToBe = null;
    Map<PTableKey, PTable> immutableIndex = Collections.emptyMap();
    DeletingParallelIteratorFactory parallelIteratorFactory;
    QueryPlan dataPlanToBe = null;
    while (true) {
        try {
            resolverToBe = FromCompiler.getResolverForMutation(delete, connection);
            tableRefToBe = resolverToBe.getTables().get(0);
            PTable table = tableRefToBe.getTable();
            // TODO: SchemaUtil.isReadOnly(PTable, connection)?
            if (table.getType() == PTableType.VIEW && table.getViewType().isReadOnly()) {
                throw new ReadOnlyTableException(schemaName, tableName);
            } else if (table.isTransactional() && connection.getSCN() != null) {
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SPECIFY_SCN_FOR_TXN_TABLE).setSchemaName(schemaName).setTableName(tableName).build().buildException();
            }
            immutableIndex = getNonDisabledImmutableIndexes(tableRefToBe);
            boolean mayHaveImmutableIndexes = !immutableIndex.isEmpty();
            noQueryReqd = !hasLimit;
            // Can't run on same server for transactional data, as we need the row keys for the data
            // that is being upserted for conflict detection purposes.
            runOnServer = isAutoCommit && noQueryReqd && !table.isTransactional();
            HintNode hint = delete.getHint();
            if (runOnServer && !delete.getHint().hasHint(Hint.USE_INDEX_OVER_DATA_TABLE)) {
                hint = HintNode.create(hint, Hint.USE_DATA_OVER_INDEX_TABLE);
            }
            List<AliasedNode> aliasedNodes = Lists.newArrayListWithExpectedSize(table.getPKColumns().size());
            boolean isSalted = table.getBucketNum() != null;
            boolean isMultiTenant = connection.getTenantId() != null && table.isMultiTenant();
            boolean isSharedViewIndex = table.getViewIndexId() != null;
            for (int i = (isSalted ? 1 : 0) + (isMultiTenant ? 1 : 0) + (isSharedViewIndex ? 1 : 0); i < table.getPKColumns().size(); i++) {
                PColumn column = table.getPKColumns().get(i);
                aliasedNodes.add(FACTORY.aliasedNode(null, FACTORY.column(null, '"' + column.getName().getString() + '"', null)));
            }
            select = FACTORY.select(delete.getTable(), hint, false, aliasedNodes, delete.getWhere(), Collections.<ParseNode>emptyList(), null, delete.getOrderBy(), delete.getLimit(), null, delete.getBindCount(), false, false, Collections.<SelectStatement>emptyList(), delete.getUdfParseNodes());
            select = StatementNormalizer.normalize(select, resolverToBe);
            SelectStatement transformedSelect = SubqueryRewriter.transform(select, resolverToBe, connection);
            if (transformedSelect != select) {
                resolverToBe = FromCompiler.getResolverForQuery(transformedSelect, connection, false, delete.getTable().getName());
                select = StatementNormalizer.normalize(transformedSelect, resolverToBe);
            }
            parallelIteratorFactory = hasLimit ? null : new DeletingParallelIteratorFactory(connection);
            QueryOptimizer optimizer = new QueryOptimizer(services);
            QueryCompiler compiler = new QueryCompiler(statement, select, resolverToBe, Collections.<PColumn>emptyList(), parallelIteratorFactory, new SequenceManager(statement));
            dataPlanToBe = compiler.compile();
            queryPlans = Lists.newArrayList(mayHaveImmutableIndexes ? optimizer.getApplicablePlans(dataPlanToBe, statement, select, resolverToBe, Collections.<PColumn>emptyList(), parallelIteratorFactory) : optimizer.getBestPlan(dataPlanToBe, statement, select, resolverToBe, Collections.<PColumn>emptyList(), parallelIteratorFactory));
            if (mayHaveImmutableIndexes) {
                // FIXME: this is ugly
                // Lookup the table being deleted from in the cache, as it's possible that the
                // optimizer updated the cache if it found indexes that were out of date.
                // If the index was marked as disabled, it should not be in the list
                // of immutable indexes.
                table = connection.getTable(new PTableKey(table.getTenantId(), table.getName().getString()));
                tableRefToBe.setTable(table);
                immutableIndex = getNonDisabledImmutableIndexes(tableRefToBe);
            }
        } catch (MetaDataEntityNotFoundException e) {
            // Otherwise throw, as we'll just get the same error next time.
            if (retryOnce) {
                retryOnce = false;
                MetaDataMutationResult result = new MetaDataClient(connection).updateCache(schemaName, tableName);
                if (result.wasUpdated()) {
                    continue;
                }
            }
            throw e;
        }
        break;
    }
    boolean isBuildingImmutable = false;
    final boolean hasImmutableIndexes = !immutableIndex.isEmpty();
    if (hasImmutableIndexes) {
        for (PTable index : immutableIndex.values()) {
            if (index.getIndexState() == PIndexState.BUILDING) {
                isBuildingImmutable = true;
                break;
            }
        }
    }
    final QueryPlan dataPlan = dataPlanToBe;
    // tableRefs is parallel with queryPlans
    TableRef[] tableRefs = new TableRef[hasImmutableIndexes ? immutableIndex.size() : 1];
    if (hasImmutableIndexes) {
        int i = 0;
        Iterator<QueryPlan> plans = queryPlans.iterator();
        while (plans.hasNext()) {
            QueryPlan plan = plans.next();
            PTable table = plan.getTableRef().getTable();
            if (table.getType() == PTableType.INDEX) {
                // index plans
                tableRefs[i++] = plan.getTableRef();
                immutableIndex.remove(table.getKey());
            } else if (!isBuildingImmutable) {
                // data plan
                /*
                     * If we have immutable indexes that we need to maintain, don't execute the data plan
                     * as we can save a query by piggy-backing on any of the other index queries, since the
                     * PK columns that we need are always in each index row.
                     */
                plans.remove();
            }
        }
        /*
             * If we have any immutable indexes remaining, then that means that the plan for that index got filtered out
             * because it could not be executed. This would occur if a column in the where clause is not found in the
             * immutable index.
             */
        if (!immutableIndex.isEmpty()) {
            Collection<PTable> immutableIndexes = immutableIndex.values();
            if (!isBuildingImmutable || hasNonPKIndexedColumns(immutableIndexes)) {
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_FILTER_ON_IMMUTABLE_ROWS).setSchemaName(tableRefToBe.getTable().getSchemaName().getString()).setTableName(tableRefToBe.getTable().getTableName().getString()).build().buildException();
            }
            runOnServer = false;
        }
    }
    List<TableRef> buildingImmutableIndexes = Lists.newArrayListWithExpectedSize(immutableIndex.values().size());
    for (PTable index : immutableIndex.values()) {
        buildingImmutableIndexes.add(new TableRef(index, dataPlan.getTableRef().getTimeStamp(), dataPlan.getTableRef().getLowerBoundTimeStamp()));
    }
    // Make sure the first plan is targeting deletion from the data table
    // In the case of an immutable index, we'll also delete from the index.
    final TableRef dataTableRef = tableRefs[0] = tableRefToBe;
    /*
         * Create a mutationPlan for each queryPlan. One plan will be for the deletion of the rows
         * from the data table, while the others will be for deleting rows from immutable indexes.
         */
    List<MutationPlan> mutationPlans = Lists.newArrayListWithExpectedSize(tableRefs.length);
    for (int i = 0; i < tableRefs.length; i++) {
        final TableRef tableRef = tableRefs[i];
        final QueryPlan plan = queryPlans.get(i);
        if (!plan.getTableRef().equals(tableRef) || !(plan instanceof BaseQueryPlan)) {
            runOnServer = false;
            // FIXME: why set this to false in this case?
            noQueryReqd = false;
        }
        final int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
        final int maxSizeBytes = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES);
        final StatementContext context = plan.getContext();
        // may have been optimized out. Instead, we check that there's a single SkipScanFilter
        if (noQueryReqd && (!context.getScan().hasFilter() || context.getScan().getFilter() instanceof SkipScanFilter) && context.getScanRanges().isPointLookup()) {
            mutationPlans.add(new MutationPlan() {

                @Override
                public ParameterMetaData getParameterMetaData() {
                    return context.getBindManager().getParameterMetaData();
                }

                @Override
                public MutationState execute() throws SQLException {
                    // We have a point lookup, so we know we have a simple set of fully qualified
                    // keys for our ranges
                    ScanRanges ranges = context.getScanRanges();
                    Iterator<KeyRange> iterator = ranges.getPointLookupKeyIterator();
                    Map<ImmutableBytesPtr, RowMutationState> mutation = Maps.newHashMapWithExpectedSize(ranges.getPointLookupCount());
                    while (iterator.hasNext()) {
                        mutation.put(new ImmutableBytesPtr(iterator.next().getLowerRange()), new RowMutationState(PRow.DELETE_MARKER, statement.getConnection().getStatementExecutionCounter(), NULL_ROWTIMESTAMP_INFO, null));
                    }
                    return new MutationState(tableRef, mutation, 0, maxSize, maxSizeBytes, connection);
                }

                @Override
                public ExplainPlan getExplainPlan() throws SQLException {
                    return new ExplainPlan(Collections.singletonList("DELETE SINGLE ROW"));
                }

                @Override
                public StatementContext getContext() {
                    return context;
                }

                @Override
                public TableRef getTargetRef() {
                    return dataTableRef;
                }

                @Override
                public Set<TableRef> getSourceRefs() {
                    // Don't include the target
                    return Collections.emptySet();
                }

                @Override
                public Operation getOperation() {
                    return operation;
                }

                @Override
                public Long getEstimatedRowsToScan() throws SQLException {
                    return 0l;
                }

                @Override
                public Long getEstimatedBytesToScan() throws SQLException {
                    return 0l;
                }
            });
        } else if (runOnServer) {
            // TODO: better abstraction
            Scan scan = context.getScan();
            // Propagate IGNORE_NEWER_MUTATIONS when replaying mutations since there will be
            // future dated data row mutations that will get in the way of generating the
            // correct index rows on replay.
            scan.setAttribute(BaseScannerRegionObserver.IGNORE_NEWER_MUTATIONS, PDataType.TRUE_BYTES);
            scan.setAttribute(BaseScannerRegionObserver.DELETE_AGG, QueryConstants.TRUE);
            // Build an ungrouped aggregate query: select COUNT(*) from <table> where <where>
            // The coprocessor will delete each row returned from the scan
            // Ignoring ORDER BY, since with auto commit on and no limit makes no difference
            SelectStatement aggSelect = SelectStatement.create(SelectStatement.COUNT_ONE, delete.getHint());
            RowProjector projectorToBe = ProjectionCompiler.compile(context, aggSelect, GroupBy.EMPTY_GROUP_BY);
            context.getAggregationManager().compile(context, GroupBy.EMPTY_GROUP_BY);
            if (plan.getProjector().projectEveryRow()) {
                projectorToBe = new RowProjector(projectorToBe, true);
            }
            final RowProjector projector = projectorToBe;
            final QueryPlan aggPlan = new AggregatePlan(context, select, tableRef, projector, null, null, OrderBy.EMPTY_ORDER_BY, null, GroupBy.EMPTY_GROUP_BY, null);
            mutationPlans.add(new MutationPlan() {

                @Override
                public ParameterMetaData getParameterMetaData() {
                    return context.getBindManager().getParameterMetaData();
                }

                @Override
                public StatementContext getContext() {
                    return context;
                }

                @Override
                public TableRef getTargetRef() {
                    return dataTableRef;
                }

                @Override
                public Set<TableRef> getSourceRefs() {
                    return dataPlan.getSourceRefs();
                }

                @Override
                public Operation getOperation() {
                    return operation;
                }

                @Override
                public MutationState execute() throws SQLException {
                    // TODO: share this block of code with UPSERT SELECT
                    ImmutableBytesWritable ptr = context.getTempPtr();
                    PTable table = tableRef.getTable();
                    table.getIndexMaintainers(ptr, context.getConnection());
                    byte[] txState = table.isTransactional() ? connection.getMutationState().encodeTransaction() : ByteUtil.EMPTY_BYTE_ARRAY;
                    ServerCache cache = null;
                    try {
                        if (ptr.getLength() > 0) {
                            byte[] uuidValue = ServerCacheClient.generateId();
                            context.getScan().setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
                            context.getScan().setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, ptr.get());
                            context.getScan().setAttribute(BaseScannerRegionObserver.TX_STATE, txState);
                        }
                        ResultIterator iterator = aggPlan.iterator();
                        try {
                            Tuple row = iterator.next();
                            final long mutationCount = (Long) projector.getColumnProjector(0).getValue(row, PLong.INSTANCE, ptr);
                            return new MutationState(maxSize, maxSizeBytes, connection) {

                                @Override
                                public long getUpdateCount() {
                                    return mutationCount;
                                }
                            };
                        } finally {
                            iterator.close();
                        }
                    } finally {
                        if (cache != null) {
                            cache.close();
                        }
                    }
                }

                @Override
                public ExplainPlan getExplainPlan() throws SQLException {
                    List<String> queryPlanSteps = aggPlan.getExplainPlan().getPlanSteps();
                    List<String> planSteps = Lists.newArrayListWithExpectedSize(queryPlanSteps.size() + 1);
                    planSteps.add("DELETE ROWS");
                    planSteps.addAll(queryPlanSteps);
                    return new ExplainPlan(planSteps);
                }

                @Override
                public Long getEstimatedRowsToScan() throws SQLException {
                    return aggPlan.getEstimatedRowsToScan();
                }

                @Override
                public Long getEstimatedBytesToScan() throws SQLException {
                    return aggPlan.getEstimatedBytesToScan();
                }
            });
        } else {
            List<TableRef> immutableIndexRefsToBe = Lists.newArrayListWithExpectedSize(dataPlan.getTableRef().getTable().getIndexes().size());
            if (!buildingImmutableIndexes.isEmpty()) {
                immutableIndexRefsToBe = buildingImmutableIndexes;
            } else if (hasImmutableIndexes && !plan.getTableRef().equals(tableRef)) {
                immutableIndexRefsToBe = Collections.singletonList(plan.getTableRef());
            }
            final List<TableRef> immutableIndexRefs = immutableIndexRefsToBe;
            final DeletingParallelIteratorFactory parallelIteratorFactory2 = parallelIteratorFactory;
            mutationPlans.add(new MutationPlan() {

                @Override
                public ParameterMetaData getParameterMetaData() {
                    return context.getBindManager().getParameterMetaData();
                }

                @Override
                public StatementContext getContext() {
                    return context;
                }

                @Override
                public TableRef getTargetRef() {
                    return dataTableRef;
                }

                @Override
                public Set<TableRef> getSourceRefs() {
                    return dataPlan.getSourceRefs();
                }

                @Override
                public Operation getOperation() {
                    return operation;
                }

                @Override
                public MutationState execute() throws SQLException {
                    ResultIterator iterator = plan.iterator();
                    try {
                        if (!hasLimit) {
                            Tuple tuple;
                            long totalRowCount = 0;
                            if (parallelIteratorFactory2 != null) {
                                parallelIteratorFactory2.setRowProjector(plan.getProjector());
                                parallelIteratorFactory2.setTargetTableRef(tableRef);
                                parallelIteratorFactory2.setSourceTableRef(plan.getTableRef());
                                parallelIteratorFactory2.setIndexTargetTableRefs(immutableIndexRefs);
                            }
                            while ((tuple = iterator.next()) != null) {
                                // Runs query
                                Cell kv = tuple.getValue(0);
                                totalRowCount += PLong.INSTANCE.getCodec().decodeLong(kv.getValueArray(), kv.getValueOffset(), SortOrder.getDefault());
                            }
                            // Return total number of rows that have been delete. In the case of auto commit being off
                            // the mutations will all be in the mutation state of the current connection.
                            MutationState state = new MutationState(maxSize, maxSizeBytes, connection, totalRowCount);
                            // set the read metrics accumulated in the parent context so that it can be published when the mutations are committed.
                            state.setReadMetricQueue(plan.getContext().getReadMetricsQueue());
                            return state;
                        } else {
                            return deleteRows(plan.getContext(), tableRef, immutableIndexRefs, iterator, plan.getProjector(), plan.getTableRef());
                        }
                    } finally {
                        iterator.close();
                    }
                }

                @Override
                public ExplainPlan getExplainPlan() throws SQLException {
                    List<String> queryPlanSteps = plan.getExplainPlan().getPlanSteps();
                    List<String> planSteps = Lists.newArrayListWithExpectedSize(queryPlanSteps.size() + 1);
                    planSteps.add("DELETE ROWS");
                    planSteps.addAll(queryPlanSteps);
                    return new ExplainPlan(planSteps);
                }

                @Override
                public Long getEstimatedRowsToScan() throws SQLException {
                    return plan.getEstimatedRowsToScan();
                }

                @Override
                public Long getEstimatedBytesToScan() throws SQLException {
                    return plan.getEstimatedBytesToScan();
                }
            });
        }
    }
    return mutationPlans.size() == 1 ? mutationPlans.get(0) : new MultiDeleteMutationPlan(mutationPlans);
}
Also used : PTable(org.apache.phoenix.schema.PTable) ParseNode(org.apache.phoenix.parse.ParseNode) List(java.util.List) AggregatePlan(org.apache.phoenix.execute.AggregatePlan) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo) Cell(org.apache.hadoop.hbase.Cell) MetaDataClient(org.apache.phoenix.schema.MetaDataClient) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) ResultIterator(org.apache.phoenix.iterate.ResultIterator) BaseQueryPlan(org.apache.phoenix.execute.BaseQueryPlan) AliasedNode(org.apache.phoenix.parse.AliasedNode) ReadOnlyTableException(org.apache.phoenix.schema.ReadOnlyTableException) HintNode(org.apache.phoenix.parse.HintNode) PLong(org.apache.phoenix.schema.types.PLong) Scan(org.apache.hadoop.hbase.client.Scan) Map(java.util.Map) HashMap(java.util.HashMap) ServerCache(org.apache.phoenix.cache.ServerCacheClient.ServerCache) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) Set(java.util.Set) PhoenixResultSet(org.apache.phoenix.jdbc.PhoenixResultSet) SQLException(java.sql.SQLException) Operation(org.apache.phoenix.jdbc.PhoenixStatement.Operation) BaseQueryPlan(org.apache.phoenix.execute.BaseQueryPlan) PColumn(org.apache.phoenix.schema.PColumn) SelectStatement(org.apache.phoenix.parse.SelectStatement) MetaDataEntityNotFoundException(org.apache.phoenix.schema.MetaDataEntityNotFoundException) ResultIterator(org.apache.phoenix.iterate.ResultIterator) Iterator(java.util.Iterator) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) SkipScanFilter(org.apache.phoenix.filter.SkipScanFilter) QueryOptimizer(org.apache.phoenix.optimize.QueryOptimizer) Hint(org.apache.phoenix.parse.HintNode.Hint) MutationState(org.apache.phoenix.execute.MutationState) RowMutationState(org.apache.phoenix.execute.MutationState.RowMutationState) NamedTableNode(org.apache.phoenix.parse.NamedTableNode) PTableKey(org.apache.phoenix.schema.PTableKey) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices) TableRef(org.apache.phoenix.schema.TableRef) ParameterMetaData(java.sql.ParameterMetaData) Tuple(org.apache.phoenix.schema.tuple.Tuple) RowMutationState(org.apache.phoenix.execute.MutationState.RowMutationState)

Example 30 with MetaDataMutationResult

use of org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult in project phoenix by apache.

the class MutationState method shouldResubmitTransaction.

/**
     * Determines whether indexes were added to mutated tables while the transaction was in progress.
     * @return true if indexes were added and false otherwise.
     * @throws SQLException 
     */
private boolean shouldResubmitTransaction(Set<TableRef> txTableRefs) throws SQLException {
    if (logger.isInfoEnabled())
        logger.info("Checking for index updates as of " + getInitialWritePointer());
    MetaDataClient client = new MetaDataClient(connection);
    PMetaData cache = connection.getMetaDataCache();
    boolean addedAnyIndexes = false;
    boolean allImmutableTables = !txTableRefs.isEmpty();
    for (TableRef tableRef : txTableRefs) {
        PTable dataTable = tableRef.getTable();
        List<PTable> oldIndexes;
        PTableRef ptableRef = cache.getTableRef(dataTable.getKey());
        oldIndexes = ptableRef.getTable().getIndexes();
        // Always check at server for metadata change, as it's possible that the table is configured to not check for metadata changes
        // but in this case, the tx manager is telling us it's likely that there has been a change.
        MetaDataMutationResult result = client.updateCache(dataTable.getTenantId(), dataTable.getSchemaName().getString(), dataTable.getTableName().getString(), true);
        long timestamp = TransactionUtil.getResolvedTime(connection, result);
        tableRef.setTimeStamp(timestamp);
        PTable updatedDataTable = result.getTable();
        if (updatedDataTable == null) {
            throw new TableNotFoundException(dataTable.getSchemaName().getString(), dataTable.getTableName().getString());
        }
        allImmutableTables &= updatedDataTable.isImmutableRows();
        tableRef.setTable(updatedDataTable);
        if (!addedAnyIndexes) {
            // TODO: in theory we should do a deep equals check here, as it's possible
            // that an index was dropped and recreated with the same name but different
            // indexed/covered columns.
            addedAnyIndexes = (!oldIndexes.equals(updatedDataTable.getIndexes()));
            if (logger.isInfoEnabled())
                logger.info((addedAnyIndexes ? "Updates " : "No updates ") + "as of " + timestamp + " to " + updatedDataTable.getName().getString() + " with indexes " + updatedDataTable.getIndexes());
        }
    }
    if (logger.isInfoEnabled())
        logger.info((addedAnyIndexes ? "Updates " : "No updates ") + "to indexes as of " + getInitialWritePointer() + " over " + (allImmutableTables ? " all immutable tables" : " some mutable tables"));
    // If any indexes were added, then the conflict might be due to DDL/DML fence.
    return allImmutableTables || addedAnyIndexes;
}
Also used : MetaDataClient(org.apache.phoenix.schema.MetaDataClient) TableNotFoundException(org.apache.phoenix.schema.TableNotFoundException) PMetaData(org.apache.phoenix.schema.PMetaData) PTableRef(org.apache.phoenix.schema.PTableRef) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) PTableRef(org.apache.phoenix.schema.PTableRef) TableRef(org.apache.phoenix.schema.TableRef) PTable(org.apache.phoenix.schema.PTable)

Aggregations

MetaDataMutationResult (org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)36 Mutation (org.apache.hadoop.hbase.client.Mutation)20 MutationCode (org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode)15 PLong (org.apache.phoenix.schema.types.PLong)11 MutationState (org.apache.phoenix.execute.MutationState)10 PTable (org.apache.phoenix.schema.PTable)10 PUnsignedLong (org.apache.phoenix.schema.types.PUnsignedLong)9 IOException (java.io.IOException)8 BlockingRpcCallback (org.apache.hadoop.hbase.ipc.BlockingRpcCallback)8 ServerRpcController (org.apache.hadoop.hbase.ipc.ServerRpcController)8 MutationProto (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto)8 MetaDataResponse (org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse)8 MetaDataService (org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService)8 SQLExceptionInfo (org.apache.phoenix.exception.SQLExceptionInfo)8 PhoenixIndexBuilder (org.apache.phoenix.index.PhoenixIndexBuilder)8 TableNotFoundException (org.apache.phoenix.schema.TableNotFoundException)8 ThreadFactoryBuilder (com.google.common.util.concurrent.ThreadFactoryBuilder)7 ArrayList (java.util.ArrayList)7 HashMap (java.util.HashMap)7 Batch (org.apache.hadoop.hbase.client.coprocessor.Batch)7