Search in sources :

Example 6 with ReadOnlyTableException

use of org.apache.phoenix.schema.ReadOnlyTableException in project phoenix by apache.

the class ConnectionQueryServicesImpl method ensureTableCreated.

/**
     *
     * @param tableName
     * @param splits
     * @param modifyExistingMetaData TODO
     * @return true if table was created and false if it already exists
     * @throws SQLException
     */
private HTableDescriptor ensureTableCreated(byte[] tableName, PTableType tableType, Map<String, Object> props, List<Pair<byte[], Map<String, Object>>> families, byte[][] splits, boolean modifyExistingMetaData, boolean isNamespaceMapped) throws SQLException {
    SQLException sqlE = null;
    HTableDescriptor existingDesc = null;
    boolean isMetaTable = SchemaUtil.isMetaTable(tableName);
    byte[] physicalTable = SchemaUtil.getPhysicalHBaseTableName(tableName, isNamespaceMapped, tableType).getBytes();
    boolean tableExist = true;
    try (HBaseAdmin admin = getAdmin()) {
        final String quorum = ZKConfig.getZKQuorumServersString(config);
        final String znode = this.props.get(HConstants.ZOOKEEPER_ZNODE_PARENT);
        logger.debug("Found quorum: " + quorum + ":" + znode);
        try {
            existingDesc = admin.getTableDescriptor(physicalTable);
        } catch (org.apache.hadoop.hbase.TableNotFoundException e) {
            tableExist = false;
            if (tableType == PTableType.VIEW) {
                String fullTableName = Bytes.toString(tableName);
                throw new ReadOnlyTableException("An HBase table for a VIEW must already exist", SchemaUtil.getSchemaNameFromFullName(fullTableName), SchemaUtil.getTableNameFromFullName(fullTableName));
            }
        }
        HTableDescriptor newDesc = generateTableDescriptor(tableName, existingDesc, tableType, props, families, splits, isNamespaceMapped);
        if (!tableExist) {
            if (newDesc.getValue(MetaDataUtil.IS_LOCAL_INDEX_TABLE_PROP_BYTES) != null && Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(newDesc.getValue(MetaDataUtil.IS_LOCAL_INDEX_TABLE_PROP_BYTES)))) {
                newDesc.setValue(HTableDescriptor.SPLIT_POLICY, IndexRegionSplitPolicy.class.getName());
            }
            // Remove the splitPolicy attribute to prevent HBASE-12570
            if (isMetaTable) {
                newDesc.remove(HTableDescriptor.SPLIT_POLICY);
            }
            try {
                if (splits == null) {
                    admin.createTable(newDesc);
                } else {
                    admin.createTable(newDesc, splits);
                }
            } catch (TableExistsException e) {
                // to creating the HBase metadata.
                return null;
            }
            if (isMetaTable) {
                checkClientServerCompatibility(SchemaUtil.getPhysicalName(SYSTEM_CATALOG_NAME_BYTES, this.getProps()).getName());
                /*
                     * Now we modify the table to add the split policy, since we know that the client and
                     * server and compatible. This works around HBASE-12570 which causes the cluster to be
                     * brought down.
                     */
                newDesc.setValue(HTableDescriptor.SPLIT_POLICY, MetaDataSplitPolicy.class.getName());
                modifyTable(physicalTable, newDesc, true);
            }
            return null;
        } else {
            if (isMetaTable) {
                checkClientServerCompatibility(SchemaUtil.getPhysicalName(SYSTEM_CATALOG_NAME_BYTES, this.getProps()).getName());
            } else {
                for (Pair<byte[], Map<String, Object>> family : families) {
                    if ((newDesc.getValue(HTableDescriptor.SPLIT_POLICY) == null || !newDesc.getValue(HTableDescriptor.SPLIT_POLICY).equals(IndexRegionSplitPolicy.class.getName())) && Bytes.toString(family.getFirst()).startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) {
                        newDesc.setValue(HTableDescriptor.SPLIT_POLICY, IndexRegionSplitPolicy.class.getName());
                        break;
                    }
                }
            }
            if (!modifyExistingMetaData) {
                // Caller already knows that no metadata was changed
                return existingDesc;
            }
            boolean willBeTx = Boolean.TRUE.equals(props.get(TableProperty.TRANSACTIONAL.name()));
            // data is correctly read.
            if (willBeTx) {
                newDesc.setValue(TxConstants.READ_NON_TX_DATA, Boolean.TRUE.toString());
            } else {
                // transactional, don't allow.
                if (existingDesc.hasCoprocessor(PhoenixTransactionalProcessor.class.getName())) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.TX_MAY_NOT_SWITCH_TO_NON_TX).setSchemaName(SchemaUtil.getSchemaNameFromFullName(tableName)).setTableName(SchemaUtil.getTableNameFromFullName(tableName)).build().buildException();
                }
                newDesc.remove(TxConstants.READ_NON_TX_DATA);
            }
            if (existingDesc.equals(newDesc)) {
                // Indicate that no metadata was changed
                return null;
            }
            modifyTable(physicalTable, newDesc, true);
            return newDesc;
        }
    } catch (IOException e) {
        sqlE = ServerUtil.parseServerException(e);
    } catch (InterruptedException e) {
        // restore the interrupt status
        Thread.currentThread().interrupt();
        sqlE = new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e).build().buildException();
    } catch (TimeoutException e) {
        sqlE = new SQLExceptionInfo.Builder(SQLExceptionCode.OPERATION_TIMED_OUT).setRootCause(e.getCause() != null ? e.getCause() : e).build().buildException();
    } finally {
        if (sqlE != null) {
            throw sqlE;
        }
    }
    // will never make it here
    return null;
}
Also used : SQLException(java.sql.SQLException) PhoenixTransactionalProcessor(org.apache.phoenix.coprocessor.PhoenixTransactionalProcessor) KeyValueBuilder(org.apache.phoenix.hbase.index.util.KeyValueBuilder) NonTxIndexBuilder(org.apache.phoenix.hbase.index.covered.NonTxIndexBuilder) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) PhoenixIndexBuilder(org.apache.phoenix.index.PhoenixIndexBuilder) IndexRegionSplitPolicy(org.apache.phoenix.hbase.index.IndexRegionSplitPolicy) IOException(java.io.IOException) PhoenixIOException(org.apache.phoenix.exception.PhoenixIOException) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) MetaDataSplitPolicy(org.apache.phoenix.schema.MetaDataSplitPolicy) ReadOnlyTableException(org.apache.phoenix.schema.ReadOnlyTableException) HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) TableExistsException(org.apache.hadoop.hbase.TableExistsException) Map(java.util.Map) TreeMap(java.util.TreeMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo) TimeoutException(java.util.concurrent.TimeoutException)

Example 7 with ReadOnlyTableException

use of org.apache.phoenix.schema.ReadOnlyTableException in project phoenix by apache.

the class ConnectionQueryServicesImpl method generateTableDescriptor.

private HTableDescriptor generateTableDescriptor(byte[] tableName, HTableDescriptor existingDesc, PTableType tableType, Map<String, Object> tableProps, List<Pair<byte[], Map<String, Object>>> families, byte[][] splits, boolean isNamespaceMapped) throws SQLException {
    String defaultFamilyName = (String) tableProps.remove(PhoenixDatabaseMetaData.DEFAULT_COLUMN_FAMILY_NAME);
    HTableDescriptor tableDescriptor = (existingDesc != null) ? new HTableDescriptor(existingDesc) : new HTableDescriptor(SchemaUtil.getPhysicalHBaseTableName(tableName, isNamespaceMapped, tableType).getBytes());
    // By default, do not automatically rebuild/catch up an index on a write failure
    for (Entry<String, Object> entry : tableProps.entrySet()) {
        String key = entry.getKey();
        if (!TableProperty.isPhoenixTableProperty(key)) {
            Object value = entry.getValue();
            tableDescriptor.setValue(key, value == null ? null : value.toString());
        }
    }
    if (families.isEmpty()) {
        if (tableType != PTableType.VIEW) {
            byte[] defaultFamilyByes = defaultFamilyName == null ? QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES : Bytes.toBytes(defaultFamilyName);
            // Add dummy column family so we have key values for tables that
            HColumnDescriptor columnDescriptor = generateColumnFamilyDescriptor(new Pair<byte[], Map<String, Object>>(defaultFamilyByes, Collections.<String, Object>emptyMap()), tableType);
            tableDescriptor.addFamily(columnDescriptor);
        }
    } else {
        for (Pair<byte[], Map<String, Object>> family : families) {
            // If family is only in phoenix description, add it. otherwise, modify its property accordingly.
            byte[] familyByte = family.getFirst();
            if (tableDescriptor.getFamily(familyByte) == null) {
                if (tableType == PTableType.VIEW) {
                    String fullTableName = Bytes.toString(tableName);
                    throw new ReadOnlyTableException("The HBase column families for a read-only table must already exist", SchemaUtil.getSchemaNameFromFullName(fullTableName), SchemaUtil.getTableNameFromFullName(fullTableName), Bytes.toString(familyByte));
                }
                HColumnDescriptor columnDescriptor = generateColumnFamilyDescriptor(family, tableType);
                tableDescriptor.addFamily(columnDescriptor);
            } else {
                if (tableType != PTableType.VIEW) {
                    HColumnDescriptor columnDescriptor = tableDescriptor.getFamily(familyByte);
                    if (columnDescriptor == null) {
                        throw new IllegalArgumentException("Unable to find column descriptor with family name " + Bytes.toString(family.getFirst()));
                    }
                    modifyColumnFamilyDescriptor(columnDescriptor, family.getSecond());
                }
            }
        }
    }
    addCoprocessors(tableName, tableDescriptor, tableType, tableProps);
    // PHOENIX-3072: Set index priority if this is a system table or index table
    if (tableType == PTableType.SYSTEM) {
        tableDescriptor.setValue(QueryConstants.PRIORITY, String.valueOf(PhoenixRpcSchedulerFactory.getMetadataPriority(config)));
    } else if (// Global, mutable index
    tableType == PTableType.INDEX && !isLocalIndexTable(tableDescriptor.getFamiliesKeys()) && !Boolean.TRUE.equals(tableProps.get(PhoenixDatabaseMetaData.IMMUTABLE_ROWS))) {
        tableDescriptor.setValue(QueryConstants.PRIORITY, String.valueOf(PhoenixRpcSchedulerFactory.getIndexPriority(config)));
    }
    return tableDescriptor;
}
Also used : ReadOnlyTableException(org.apache.phoenix.schema.ReadOnlyTableException) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) Map(java.util.Map) TreeMap(java.util.TreeMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 8 with ReadOnlyTableException

use of org.apache.phoenix.schema.ReadOnlyTableException in project phoenix by apache.

the class DeleteCompiler method compile.

public MutationPlan compile(DeleteStatement delete) throws SQLException {
    final PhoenixConnection connection = statement.getConnection();
    final boolean isAutoCommit = connection.getAutoCommit();
    final boolean hasLimit = delete.getLimit() != null;
    final ConnectionQueryServices services = connection.getQueryServices();
    List<QueryPlan> queryPlans;
    NamedTableNode tableNode = delete.getTable();
    String tableName = tableNode.getName().getTableName();
    String schemaName = tableNode.getName().getSchemaName();
    boolean retryOnce = !isAutoCommit;
    TableRef tableRefToBe;
    boolean noQueryReqd = false;
    boolean runOnServer = false;
    SelectStatement select = null;
    ColumnResolver resolverToBe = null;
    Map<PTableKey, PTable> immutableIndex = Collections.emptyMap();
    DeletingParallelIteratorFactory parallelIteratorFactory;
    QueryPlan dataPlanToBe = null;
    while (true) {
        try {
            resolverToBe = FromCompiler.getResolverForMutation(delete, connection);
            tableRefToBe = resolverToBe.getTables().get(0);
            PTable table = tableRefToBe.getTable();
            // TODO: SchemaUtil.isReadOnly(PTable, connection)?
            if (table.getType() == PTableType.VIEW && table.getViewType().isReadOnly()) {
                throw new ReadOnlyTableException(schemaName, tableName);
            } else if (table.isTransactional() && connection.getSCN() != null) {
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SPECIFY_SCN_FOR_TXN_TABLE).setSchemaName(schemaName).setTableName(tableName).build().buildException();
            }
            immutableIndex = getNonDisabledImmutableIndexes(tableRefToBe);
            boolean mayHaveImmutableIndexes = !immutableIndex.isEmpty();
            noQueryReqd = !hasLimit;
            // Can't run on same server for transactional data, as we need the row keys for the data
            // that is being upserted for conflict detection purposes.
            runOnServer = isAutoCommit && noQueryReqd && !table.isTransactional();
            HintNode hint = delete.getHint();
            if (runOnServer && !delete.getHint().hasHint(Hint.USE_INDEX_OVER_DATA_TABLE)) {
                hint = HintNode.create(hint, Hint.USE_DATA_OVER_INDEX_TABLE);
            }
            List<AliasedNode> aliasedNodes = Lists.newArrayListWithExpectedSize(table.getPKColumns().size());
            boolean isSalted = table.getBucketNum() != null;
            boolean isMultiTenant = connection.getTenantId() != null && table.isMultiTenant();
            boolean isSharedViewIndex = table.getViewIndexId() != null;
            for (int i = (isSalted ? 1 : 0) + (isMultiTenant ? 1 : 0) + (isSharedViewIndex ? 1 : 0); i < table.getPKColumns().size(); i++) {
                PColumn column = table.getPKColumns().get(i);
                aliasedNodes.add(FACTORY.aliasedNode(null, FACTORY.column(null, '"' + column.getName().getString() + '"', null)));
            }
            select = FACTORY.select(delete.getTable(), hint, false, aliasedNodes, delete.getWhere(), Collections.<ParseNode>emptyList(), null, delete.getOrderBy(), delete.getLimit(), null, delete.getBindCount(), false, false, Collections.<SelectStatement>emptyList(), delete.getUdfParseNodes());
            select = StatementNormalizer.normalize(select, resolverToBe);
            SelectStatement transformedSelect = SubqueryRewriter.transform(select, resolverToBe, connection);
            if (transformedSelect != select) {
                resolverToBe = FromCompiler.getResolverForQuery(transformedSelect, connection, false, delete.getTable().getName());
                select = StatementNormalizer.normalize(transformedSelect, resolverToBe);
            }
            parallelIteratorFactory = hasLimit ? null : new DeletingParallelIteratorFactory(connection);
            QueryOptimizer optimizer = new QueryOptimizer(services);
            QueryCompiler compiler = new QueryCompiler(statement, select, resolverToBe, Collections.<PColumn>emptyList(), parallelIteratorFactory, new SequenceManager(statement));
            dataPlanToBe = compiler.compile();
            queryPlans = Lists.newArrayList(mayHaveImmutableIndexes ? optimizer.getApplicablePlans(dataPlanToBe, statement, select, resolverToBe, Collections.<PColumn>emptyList(), parallelIteratorFactory) : optimizer.getBestPlan(dataPlanToBe, statement, select, resolverToBe, Collections.<PColumn>emptyList(), parallelIteratorFactory));
            if (mayHaveImmutableIndexes) {
                // FIXME: this is ugly
                // Lookup the table being deleted from in the cache, as it's possible that the
                // optimizer updated the cache if it found indexes that were out of date.
                // If the index was marked as disabled, it should not be in the list
                // of immutable indexes.
                table = connection.getTable(new PTableKey(table.getTenantId(), table.getName().getString()));
                tableRefToBe.setTable(table);
                immutableIndex = getNonDisabledImmutableIndexes(tableRefToBe);
            }
        } catch (MetaDataEntityNotFoundException e) {
            // Otherwise throw, as we'll just get the same error next time.
            if (retryOnce) {
                retryOnce = false;
                MetaDataMutationResult result = new MetaDataClient(connection).updateCache(schemaName, tableName);
                if (result.wasUpdated()) {
                    continue;
                }
            }
            throw e;
        }
        break;
    }
    boolean isBuildingImmutable = false;
    final boolean hasImmutableIndexes = !immutableIndex.isEmpty();
    if (hasImmutableIndexes) {
        for (PTable index : immutableIndex.values()) {
            if (index.getIndexState() == PIndexState.BUILDING) {
                isBuildingImmutable = true;
                break;
            }
        }
    }
    final QueryPlan dataPlan = dataPlanToBe;
    // tableRefs is parallel with queryPlans
    TableRef[] tableRefs = new TableRef[hasImmutableIndexes ? immutableIndex.size() : 1];
    if (hasImmutableIndexes) {
        int i = 0;
        Iterator<QueryPlan> plans = queryPlans.iterator();
        while (plans.hasNext()) {
            QueryPlan plan = plans.next();
            PTable table = plan.getTableRef().getTable();
            if (table.getType() == PTableType.INDEX) {
                // index plans
                tableRefs[i++] = plan.getTableRef();
                immutableIndex.remove(table.getKey());
            } else if (!isBuildingImmutable) {
                // data plan
                /*
                     * If we have immutable indexes that we need to maintain, don't execute the data plan
                     * as we can save a query by piggy-backing on any of the other index queries, since the
                     * PK columns that we need are always in each index row.
                     */
                plans.remove();
            }
        }
        /*
             * If we have any immutable indexes remaining, then that means that the plan for that index got filtered out
             * because it could not be executed. This would occur if a column in the where clause is not found in the
             * immutable index.
             */
        if (!immutableIndex.isEmpty()) {
            Collection<PTable> immutableIndexes = immutableIndex.values();
            if (!isBuildingImmutable || hasNonPKIndexedColumns(immutableIndexes)) {
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_FILTER_ON_IMMUTABLE_ROWS).setSchemaName(tableRefToBe.getTable().getSchemaName().getString()).setTableName(tableRefToBe.getTable().getTableName().getString()).build().buildException();
            }
            runOnServer = false;
        }
    }
    List<TableRef> buildingImmutableIndexes = Lists.newArrayListWithExpectedSize(immutableIndex.values().size());
    for (PTable index : immutableIndex.values()) {
        buildingImmutableIndexes.add(new TableRef(index, dataPlan.getTableRef().getTimeStamp(), dataPlan.getTableRef().getLowerBoundTimeStamp()));
    }
    // Make sure the first plan is targeting deletion from the data table
    // In the case of an immutable index, we'll also delete from the index.
    final TableRef dataTableRef = tableRefs[0] = tableRefToBe;
    /*
         * Create a mutationPlan for each queryPlan. One plan will be for the deletion of the rows
         * from the data table, while the others will be for deleting rows from immutable indexes.
         */
    List<MutationPlan> mutationPlans = Lists.newArrayListWithExpectedSize(tableRefs.length);
    for (int i = 0; i < tableRefs.length; i++) {
        final TableRef tableRef = tableRefs[i];
        final QueryPlan plan = queryPlans.get(i);
        if (!plan.getTableRef().equals(tableRef) || !(plan instanceof BaseQueryPlan)) {
            runOnServer = false;
            // FIXME: why set this to false in this case?
            noQueryReqd = false;
        }
        final int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
        final int maxSizeBytes = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES);
        final StatementContext context = plan.getContext();
        // may have been optimized out. Instead, we check that there's a single SkipScanFilter
        if (noQueryReqd && (!context.getScan().hasFilter() || context.getScan().getFilter() instanceof SkipScanFilter) && context.getScanRanges().isPointLookup()) {
            mutationPlans.add(new MutationPlan() {

                @Override
                public ParameterMetaData getParameterMetaData() {
                    return context.getBindManager().getParameterMetaData();
                }

                @Override
                public MutationState execute() throws SQLException {
                    // We have a point lookup, so we know we have a simple set of fully qualified
                    // keys for our ranges
                    ScanRanges ranges = context.getScanRanges();
                    Iterator<KeyRange> iterator = ranges.getPointLookupKeyIterator();
                    Map<ImmutableBytesPtr, RowMutationState> mutation = Maps.newHashMapWithExpectedSize(ranges.getPointLookupCount());
                    while (iterator.hasNext()) {
                        mutation.put(new ImmutableBytesPtr(iterator.next().getLowerRange()), new RowMutationState(PRow.DELETE_MARKER, statement.getConnection().getStatementExecutionCounter(), NULL_ROWTIMESTAMP_INFO, null));
                    }
                    return new MutationState(tableRef, mutation, 0, maxSize, maxSizeBytes, connection);
                }

                @Override
                public ExplainPlan getExplainPlan() throws SQLException {
                    return new ExplainPlan(Collections.singletonList("DELETE SINGLE ROW"));
                }

                @Override
                public StatementContext getContext() {
                    return context;
                }

                @Override
                public TableRef getTargetRef() {
                    return dataTableRef;
                }

                @Override
                public Set<TableRef> getSourceRefs() {
                    // Don't include the target
                    return Collections.emptySet();
                }

                @Override
                public Operation getOperation() {
                    return operation;
                }

                @Override
                public Long getEstimatedRowsToScan() throws SQLException {
                    return 0l;
                }

                @Override
                public Long getEstimatedBytesToScan() throws SQLException {
                    return 0l;
                }
            });
        } else if (runOnServer) {
            // TODO: better abstraction
            Scan scan = context.getScan();
            // Propagate IGNORE_NEWER_MUTATIONS when replaying mutations since there will be
            // future dated data row mutations that will get in the way of generating the
            // correct index rows on replay.
            scan.setAttribute(BaseScannerRegionObserver.IGNORE_NEWER_MUTATIONS, PDataType.TRUE_BYTES);
            scan.setAttribute(BaseScannerRegionObserver.DELETE_AGG, QueryConstants.TRUE);
            // Build an ungrouped aggregate query: select COUNT(*) from <table> where <where>
            // The coprocessor will delete each row returned from the scan
            // Ignoring ORDER BY, since with auto commit on and no limit makes no difference
            SelectStatement aggSelect = SelectStatement.create(SelectStatement.COUNT_ONE, delete.getHint());
            RowProjector projectorToBe = ProjectionCompiler.compile(context, aggSelect, GroupBy.EMPTY_GROUP_BY);
            context.getAggregationManager().compile(context, GroupBy.EMPTY_GROUP_BY);
            if (plan.getProjector().projectEveryRow()) {
                projectorToBe = new RowProjector(projectorToBe, true);
            }
            final RowProjector projector = projectorToBe;
            final QueryPlan aggPlan = new AggregatePlan(context, select, tableRef, projector, null, null, OrderBy.EMPTY_ORDER_BY, null, GroupBy.EMPTY_GROUP_BY, null);
            mutationPlans.add(new MutationPlan() {

                @Override
                public ParameterMetaData getParameterMetaData() {
                    return context.getBindManager().getParameterMetaData();
                }

                @Override
                public StatementContext getContext() {
                    return context;
                }

                @Override
                public TableRef getTargetRef() {
                    return dataTableRef;
                }

                @Override
                public Set<TableRef> getSourceRefs() {
                    return dataPlan.getSourceRefs();
                }

                @Override
                public Operation getOperation() {
                    return operation;
                }

                @Override
                public MutationState execute() throws SQLException {
                    // TODO: share this block of code with UPSERT SELECT
                    ImmutableBytesWritable ptr = context.getTempPtr();
                    PTable table = tableRef.getTable();
                    table.getIndexMaintainers(ptr, context.getConnection());
                    byte[] txState = table.isTransactional() ? connection.getMutationState().encodeTransaction() : ByteUtil.EMPTY_BYTE_ARRAY;
                    ServerCache cache = null;
                    try {
                        if (ptr.getLength() > 0) {
                            byte[] uuidValue = ServerCacheClient.generateId();
                            context.getScan().setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
                            context.getScan().setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, ptr.get());
                            context.getScan().setAttribute(BaseScannerRegionObserver.TX_STATE, txState);
                        }
                        ResultIterator iterator = aggPlan.iterator();
                        try {
                            Tuple row = iterator.next();
                            final long mutationCount = (Long) projector.getColumnProjector(0).getValue(row, PLong.INSTANCE, ptr);
                            return new MutationState(maxSize, maxSizeBytes, connection) {

                                @Override
                                public long getUpdateCount() {
                                    return mutationCount;
                                }
                            };
                        } finally {
                            iterator.close();
                        }
                    } finally {
                        if (cache != null) {
                            cache.close();
                        }
                    }
                }

                @Override
                public ExplainPlan getExplainPlan() throws SQLException {
                    List<String> queryPlanSteps = aggPlan.getExplainPlan().getPlanSteps();
                    List<String> planSteps = Lists.newArrayListWithExpectedSize(queryPlanSteps.size() + 1);
                    planSteps.add("DELETE ROWS");
                    planSteps.addAll(queryPlanSteps);
                    return new ExplainPlan(planSteps);
                }

                @Override
                public Long getEstimatedRowsToScan() throws SQLException {
                    return aggPlan.getEstimatedRowsToScan();
                }

                @Override
                public Long getEstimatedBytesToScan() throws SQLException {
                    return aggPlan.getEstimatedBytesToScan();
                }
            });
        } else {
            List<TableRef> immutableIndexRefsToBe = Lists.newArrayListWithExpectedSize(dataPlan.getTableRef().getTable().getIndexes().size());
            if (!buildingImmutableIndexes.isEmpty()) {
                immutableIndexRefsToBe = buildingImmutableIndexes;
            } else if (hasImmutableIndexes && !plan.getTableRef().equals(tableRef)) {
                immutableIndexRefsToBe = Collections.singletonList(plan.getTableRef());
            }
            final List<TableRef> immutableIndexRefs = immutableIndexRefsToBe;
            final DeletingParallelIteratorFactory parallelIteratorFactory2 = parallelIteratorFactory;
            mutationPlans.add(new MutationPlan() {

                @Override
                public ParameterMetaData getParameterMetaData() {
                    return context.getBindManager().getParameterMetaData();
                }

                @Override
                public StatementContext getContext() {
                    return context;
                }

                @Override
                public TableRef getTargetRef() {
                    return dataTableRef;
                }

                @Override
                public Set<TableRef> getSourceRefs() {
                    return dataPlan.getSourceRefs();
                }

                @Override
                public Operation getOperation() {
                    return operation;
                }

                @Override
                public MutationState execute() throws SQLException {
                    ResultIterator iterator = plan.iterator();
                    try {
                        if (!hasLimit) {
                            Tuple tuple;
                            long totalRowCount = 0;
                            if (parallelIteratorFactory2 != null) {
                                parallelIteratorFactory2.setRowProjector(plan.getProjector());
                                parallelIteratorFactory2.setTargetTableRef(tableRef);
                                parallelIteratorFactory2.setSourceTableRef(plan.getTableRef());
                                parallelIteratorFactory2.setIndexTargetTableRefs(immutableIndexRefs);
                            }
                            while ((tuple = iterator.next()) != null) {
                                // Runs query
                                Cell kv = tuple.getValue(0);
                                totalRowCount += PLong.INSTANCE.getCodec().decodeLong(kv.getValueArray(), kv.getValueOffset(), SortOrder.getDefault());
                            }
                            // Return total number of rows that have been delete. In the case of auto commit being off
                            // the mutations will all be in the mutation state of the current connection.
                            MutationState state = new MutationState(maxSize, maxSizeBytes, connection, totalRowCount);
                            // set the read metrics accumulated in the parent context so that it can be published when the mutations are committed.
                            state.setReadMetricQueue(plan.getContext().getReadMetricsQueue());
                            return state;
                        } else {
                            return deleteRows(plan.getContext(), tableRef, immutableIndexRefs, iterator, plan.getProjector(), plan.getTableRef());
                        }
                    } finally {
                        iterator.close();
                    }
                }

                @Override
                public ExplainPlan getExplainPlan() throws SQLException {
                    List<String> queryPlanSteps = plan.getExplainPlan().getPlanSteps();
                    List<String> planSteps = Lists.newArrayListWithExpectedSize(queryPlanSteps.size() + 1);
                    planSteps.add("DELETE ROWS");
                    planSteps.addAll(queryPlanSteps);
                    return new ExplainPlan(planSteps);
                }

                @Override
                public Long getEstimatedRowsToScan() throws SQLException {
                    return plan.getEstimatedRowsToScan();
                }

                @Override
                public Long getEstimatedBytesToScan() throws SQLException {
                    return plan.getEstimatedBytesToScan();
                }
            });
        }
    }
    return mutationPlans.size() == 1 ? mutationPlans.get(0) : new MultiDeleteMutationPlan(mutationPlans);
}
Also used : PTable(org.apache.phoenix.schema.PTable) ParseNode(org.apache.phoenix.parse.ParseNode) List(java.util.List) AggregatePlan(org.apache.phoenix.execute.AggregatePlan) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo) Cell(org.apache.hadoop.hbase.Cell) MetaDataClient(org.apache.phoenix.schema.MetaDataClient) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) ResultIterator(org.apache.phoenix.iterate.ResultIterator) BaseQueryPlan(org.apache.phoenix.execute.BaseQueryPlan) AliasedNode(org.apache.phoenix.parse.AliasedNode) ReadOnlyTableException(org.apache.phoenix.schema.ReadOnlyTableException) HintNode(org.apache.phoenix.parse.HintNode) PLong(org.apache.phoenix.schema.types.PLong) Scan(org.apache.hadoop.hbase.client.Scan) Map(java.util.Map) HashMap(java.util.HashMap) ServerCache(org.apache.phoenix.cache.ServerCacheClient.ServerCache) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) Set(java.util.Set) PhoenixResultSet(org.apache.phoenix.jdbc.PhoenixResultSet) SQLException(java.sql.SQLException) Operation(org.apache.phoenix.jdbc.PhoenixStatement.Operation) BaseQueryPlan(org.apache.phoenix.execute.BaseQueryPlan) PColumn(org.apache.phoenix.schema.PColumn) SelectStatement(org.apache.phoenix.parse.SelectStatement) MetaDataEntityNotFoundException(org.apache.phoenix.schema.MetaDataEntityNotFoundException) ResultIterator(org.apache.phoenix.iterate.ResultIterator) Iterator(java.util.Iterator) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) SkipScanFilter(org.apache.phoenix.filter.SkipScanFilter) QueryOptimizer(org.apache.phoenix.optimize.QueryOptimizer) Hint(org.apache.phoenix.parse.HintNode.Hint) MutationState(org.apache.phoenix.execute.MutationState) RowMutationState(org.apache.phoenix.execute.MutationState.RowMutationState) NamedTableNode(org.apache.phoenix.parse.NamedTableNode) PTableKey(org.apache.phoenix.schema.PTableKey) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices) TableRef(org.apache.phoenix.schema.TableRef) ParameterMetaData(java.sql.ParameterMetaData) Tuple(org.apache.phoenix.schema.tuple.Tuple) RowMutationState(org.apache.phoenix.execute.MutationState.RowMutationState)

Example 9 with ReadOnlyTableException

use of org.apache.phoenix.schema.ReadOnlyTableException in project phoenix by apache.

the class ViewIT method testViewWithCurrentDate.

@Test
public void testViewWithCurrentDate() throws Exception {
    Connection conn = DriverManager.getConnection(getUrl());
    String ddl = "CREATE TABLE " + fullTableName + " (k INTEGER NOT NULL PRIMARY KEY, v1 INTEGER, v2 DATE)" + tableDDLOptions;
    conn.createStatement().execute(ddl);
    String viewName = "V_" + generateUniqueName();
    ddl = "CREATE VIEW " + viewName + " (v VARCHAR) AS SELECT * FROM " + fullTableName + " WHERE v2 > CURRENT_DATE()-5 AND v2 > DATE '2010-01-01'";
    conn.createStatement().execute(ddl);
    try {
        conn.createStatement().execute("UPSERT INTO " + viewName + " VALUES(1)");
        fail();
    } catch (ReadOnlyTableException e) {
    }
    for (int i = 0; i < 10; i++) {
        conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES(" + i + ", " + (i + 10) + ",CURRENT_DATE()-" + i + ")");
    }
    conn.commit();
    int count = 0;
    ResultSet rs = conn.createStatement().executeQuery("SELECT k FROM " + viewName);
    while (rs.next()) {
        assertEquals(count, rs.getInt(1));
        count++;
    }
    assertEquals(5, count);
}
Also used : ReadOnlyTableException(org.apache.phoenix.schema.ReadOnlyTableException) Connection(java.sql.Connection) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ResultSet(java.sql.ResultSet) Test(org.junit.Test)

Example 10 with ReadOnlyTableException

use of org.apache.phoenix.schema.ReadOnlyTableException in project phoenix by apache.

the class QueryDatabaseMetaDataIT method testCreateViewOnExistingTable.

@SuppressWarnings("deprecation")
@Test
public void testCreateViewOnExistingTable() throws Exception {
    PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
    String tableName = MDTEST_NAME;
    String schemaName = MDTEST_SCHEMA_NAME;
    byte[] cfB = Bytes.toBytes(SchemaUtil.normalizeIdentifier("b"));
    byte[] cfC = Bytes.toBytes("c");
    byte[][] familyNames = new byte[][] { cfB, cfC };
    byte[] htableName = SchemaUtil.getTableNameAsBytes(schemaName, tableName);
    HBaseAdmin admin = pconn.getQueryServices().getAdmin();
    try {
        admin.disableTable(htableName);
        admin.deleteTable(htableName);
    } catch (org.apache.hadoop.hbase.TableNotFoundException e) {
    }
    HTableDescriptor descriptor = new HTableDescriptor(htableName);
    for (byte[] familyName : familyNames) {
        HColumnDescriptor columnDescriptor = new HColumnDescriptor(familyName);
        descriptor.addFamily(columnDescriptor);
    }
    admin.createTable(descriptor);
    admin.close();
    long ts = nextTimestamp();
    Properties props = new Properties();
    props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 5));
    Connection conn1 = DriverManager.getConnection(getUrl(), props);
    String createStmt = "create view bogusTable" + "   (id char(1) not null primary key,\n" + "    a.col1 integer,\n" + "    d.col2 bigint)\n";
    try {
        conn1.createStatement().execute(createStmt);
        fail();
    } catch (TableNotFoundException e) {
    // expected to fail b/c table doesn't exist
    } catch (ReadOnlyTableException e) {
    // expected to fail b/c table doesn't exist
    }
    createStmt = "create view " + MDTEST_NAME + "   (id char(1) not null primary key,\n" + "    a.col1 integer,\n" + "    b.col2 bigint)\n";
    try {
        conn1.createStatement().execute(createStmt);
        fail();
    } catch (ReadOnlyTableException e) {
    // expected to fail b/c cf a doesn't exist
    }
    createStmt = "create view " + MDTEST_NAME + "   (id char(1) not null primary key,\n" + "    b.col1 integer,\n" + "    c.col2 bigint)\n";
    try {
        conn1.createStatement().execute(createStmt);
        fail();
    } catch (ReadOnlyTableException e) {
    // expected to fail b/c cf C doesn't exist (case issue)
    }
    createStmt = "create view " + MDTEST_NAME + "   (id char(1) not null primary key,\n" + "    b.col1 integer,\n" + "    \"c\".col2 bigint) IMMUTABLE_ROWS=true \n";
    // should be ok now
    conn1.createStatement().execute(createStmt);
    conn1.close();
    props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 6));
    PhoenixConnection conn2 = DriverManager.getConnection(getUrl(), props).unwrap(PhoenixConnection.class);
    ResultSet rs = conn2.getMetaData().getTables(null, null, MDTEST_NAME, null);
    assertTrue(rs.next());
    assertEquals(ViewType.MAPPED.name(), rs.getString(PhoenixDatabaseMetaData.VIEW_TYPE));
    assertFalse(rs.next());
    String deleteStmt = "DELETE FROM " + MDTEST_NAME;
    PreparedStatement ps = conn2.prepareStatement(deleteStmt);
    try {
        ps.execute();
        fail();
    } catch (ReadOnlyTableException e) {
    // expected to fail b/c table is read-only
    }
    String upsert = "UPSERT INTO " + MDTEST_NAME + "(id,col1,col2) VALUES(?,?,?)";
    ps = conn2.prepareStatement(upsert);
    try {
        ps.setString(1, Integer.toString(0));
        ps.setInt(2, 1);
        ps.setInt(3, 2);
        ps.execute();
        fail();
    } catch (ReadOnlyTableException e) {
    // expected to fail b/c table is read-only
    }
    HTableInterface htable = conn2.getQueryServices().getTable(SchemaUtil.getTableNameAsBytes(MDTEST_SCHEMA_NAME, MDTEST_NAME));
    Put put = new Put(Bytes.toBytes("0"));
    put.add(cfB, Bytes.toBytes("COL1"), ts + 6, PInteger.INSTANCE.toBytes(1));
    put.add(cfC, Bytes.toBytes("COL2"), ts + 6, PLong.INSTANCE.toBytes(2));
    htable.put(put);
    conn2.close();
    props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 10));
    Connection conn7 = DriverManager.getConnection(getUrl(), props);
    // Should be ok b/c we've marked the view with IMMUTABLE_ROWS=true
    conn7.createStatement().execute("CREATE INDEX idx ON " + MDTEST_NAME + "(B.COL1)");
    String select = "SELECT col1 FROM " + MDTEST_NAME + " WHERE col2=?";
    ps = conn7.prepareStatement(select);
    ps.setInt(1, 2);
    rs = ps.executeQuery();
    assertTrue(rs.next());
    assertEquals(1, rs.getInt(1));
    assertFalse(rs.next());
    props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 12));
    Connection conn75 = DriverManager.getConnection(getUrl(), props);
    String dropTable = "DROP TABLE " + MDTEST_NAME;
    ps = conn75.prepareStatement(dropTable);
    try {
        ps.execute();
        fail();
    } catch (TableNotFoundException e) {
    // expected to fail b/c it is a view
    }
    String dropView = "DROP VIEW " + MDTEST_NAME;
    ps = conn75.prepareStatement(dropView);
    ps.execute();
    conn75.close();
    props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 15));
    Connection conn8 = DriverManager.getConnection(getUrl(), props);
    createStmt = "create view " + MDTEST_NAME + "   (id char(1) not null primary key,\n" + "    b.col1 integer,\n" + "    \"c\".col2 bigint) IMMUTABLE_ROWS=true\n";
    // should be ok to create a view with IMMUTABLE_ROWS = true
    conn8.createStatement().execute(createStmt);
    conn8.close();
    props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 20));
    Connection conn9 = DriverManager.getConnection(getUrl(), props);
    conn9.createStatement().execute("CREATE INDEX idx ON " + MDTEST_NAME + "(B.COL1)");
    props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 30));
    Connection conn91 = DriverManager.getConnection(getUrl(), props);
    ps = conn91.prepareStatement(dropView);
    ps.execute();
    conn91.close();
    props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 35));
    Connection conn92 = DriverManager.getConnection(getUrl(), props);
    createStmt = "create view " + MDTEST_NAME + "   (id char(1) not null primary key,\n" + "    b.col1 integer,\n" + "    \"c\".col2 bigint) as\n" + " select * from " + MDTEST_NAME + " where b.col1 = 1";
    conn92.createStatement().execute(createStmt);
    conn92.close();
    put = new Put(Bytes.toBytes("1"));
    put.add(cfB, Bytes.toBytes("COL1"), ts + 39, PInteger.INSTANCE.toBytes(3));
    put.add(cfC, Bytes.toBytes("COL2"), ts + 39, PLong.INSTANCE.toBytes(4));
    htable.put(put);
    props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 40));
    Connection conn92a = DriverManager.getConnection(getUrl(), props);
    rs = conn92a.createStatement().executeQuery("select count(*) from " + MDTEST_NAME);
    assertTrue(rs.next());
    assertEquals(1, rs.getInt(1));
    conn92a.close();
    props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 45));
    Connection conn93 = DriverManager.getConnection(getUrl(), props);
    try {
        String alterView = "alter view " + MDTEST_NAME + " drop column b.col1";
        conn93.createStatement().execute(alterView);
        fail();
    } catch (SQLException e) {
        assertEquals(SQLExceptionCode.CANNOT_MUTATE_TABLE.getErrorCode(), e.getErrorCode());
    }
    conn93.close();
    props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 50));
    Connection conn94 = DriverManager.getConnection(getUrl(), props);
    String alterView = "alter view " + MDTEST_NAME + " drop column \"c\".col2";
    conn94.createStatement().execute(alterView);
    conn94.close();
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) SQLException(java.sql.SQLException) Connection(java.sql.Connection) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) PreparedStatement(java.sql.PreparedStatement) Properties(java.util.Properties) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) ReadOnlyTableException(org.apache.phoenix.schema.ReadOnlyTableException) HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) TableNotFoundException(org.apache.phoenix.schema.TableNotFoundException) ResultSet(java.sql.ResultSet) Test(org.junit.Test)

Aggregations

ReadOnlyTableException (org.apache.phoenix.schema.ReadOnlyTableException)10 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)8 Connection (java.sql.Connection)6 ResultSet (java.sql.ResultSet)6 Test (org.junit.Test)6 SQLException (java.sql.SQLException)3 HashMap (java.util.HashMap)3 Map (java.util.Map)3 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)3 SQLExceptionInfo (org.apache.phoenix.exception.SQLExceptionInfo)3 ImmutableMap (com.google.common.collect.ImmutableMap)2 TreeMap (java.util.TreeMap)2 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)2 ConcurrentMap (java.util.concurrent.ConcurrentMap)2 Cell (org.apache.hadoop.hbase.Cell)2 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)2 HBaseAdmin (org.apache.hadoop.hbase.client.HBaseAdmin)2 Scan (org.apache.hadoop.hbase.client.Scan)2 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)2 AggregatePlan (org.apache.phoenix.execute.AggregatePlan)2