Search in sources :

Example 6 with MutationCode

use of org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode in project phoenix by apache.

the class MetaDataClient method dropTable.

private MutationState dropTable(String schemaName, String tableName, String parentTableName, PTableType tableType, boolean ifExists, boolean cascade) throws SQLException {
    connection.rollback();
    boolean wasAutoCommit = connection.getAutoCommit();
    try {
        PName tenantId = connection.getTenantId();
        String tenantIdStr = tenantId == null ? null : tenantId.getString();
        byte[] key = SchemaUtil.getTableKey(tenantIdStr, schemaName, tableName);
        Long scn = connection.getSCN();
        long clientTimeStamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
        List<Mutation> tableMetaData = Lists.newArrayListWithExpectedSize(2);
        Delete tableDelete = new Delete(key, clientTimeStamp);
        tableMetaData.add(tableDelete);
        boolean hasViewIndexTable = false;
        if (parentTableName != null) {
            byte[] linkKey = MetaDataUtil.getParentLinkKey(tenantIdStr, schemaName, parentTableName, tableName);
            Delete linkDelete = new Delete(linkKey, clientTimeStamp);
            tableMetaData.add(linkDelete);
        }
        MetaDataMutationResult result = connection.getQueryServices().dropTable(tableMetaData, tableType, cascade);
        MutationCode code = result.getMutationCode();
        PTable table = result.getTable();
        switch(code) {
            case TABLE_NOT_FOUND:
                if (!ifExists) {
                    throw new TableNotFoundException(schemaName, tableName);
                }
                break;
            case NEWER_TABLE_FOUND:
                throw new NewerTableAlreadyExistsException(schemaName, tableName, result.getTable());
            case UNALLOWED_TABLE_MUTATION:
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_MUTATE_TABLE).setSchemaName(schemaName).setTableName(tableName).build().buildException();
            default:
                connection.removeTable(tenantId, SchemaUtil.getTableName(schemaName, tableName), parentTableName, result.getMutationTime());
                if (table != null) {
                    boolean dropMetaData = false;
                    long ts = (scn == null ? result.getMutationTime() : scn);
                    List<TableRef> tableRefs = Lists.newArrayListWithExpectedSize(2 + table.getIndexes().size());
                    connection.setAutoCommit(true);
                    if (tableType == PTableType.VIEW) {
                        for (PTable index : table.getIndexes()) {
                            tableRefs.add(new TableRef(null, index, ts, false));
                        }
                    } else {
                        dropMetaData = result.getTable().getViewIndexId() == null && connection.getQueryServices().getProps().getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA);
                        // All multi-tenant tables have a view index table, so no need to check in that case
                        if (parentTableName == null) {
                            // keeping always true for deletion of stats if view index present
                            hasViewIndexTable = true;
                            // or not
                            MetaDataUtil.deleteViewIndexSequences(connection, table.getPhysicalName(), table.isNamespaceMapped());
                            byte[] viewIndexPhysicalName = MetaDataUtil.getViewIndexPhysicalName(table.getPhysicalName().getBytes());
                            if (!dropMetaData) {
                                // we need to drop rows only when actually view index exists
                                try (HBaseAdmin admin = connection.getQueryServices().getAdmin()) {
                                    hasViewIndexTable = admin.tableExists(viewIndexPhysicalName);
                                } catch (IOException e1) {
                                // absorbing as it is not critical check
                                }
                            }
                        }
                        if (tableType == PTableType.TABLE && (table.isMultiTenant() || hasViewIndexTable)) {
                            if (hasViewIndexTable) {
                                byte[] viewIndexPhysicalName = MetaDataUtil.getViewIndexPhysicalName(table.getPhysicalName().getBytes());
                                PTable viewIndexTable = new PTableImpl(null, SchemaUtil.getSchemaNameFromFullName(viewIndexPhysicalName), SchemaUtil.getTableNameFromFullName(viewIndexPhysicalName), ts, table.getColumnFamilies(), table.isNamespaceMapped(), table.getImmutableStorageScheme(), table.getEncodingScheme(), table.useStatsForParallelization());
                                tableRefs.add(new TableRef(null, viewIndexTable, ts, false));
                            }
                        }
                        tableRefs.add(new TableRef(null, table, ts, false));
                        // TODO: Let the standard mutable secondary index maintenance handle this?
                        for (PTable index : table.getIndexes()) {
                            tableRefs.add(new TableRef(null, index, ts, false));
                        }
                        deleteFromStatsTable(tableRefs, ts);
                    }
                    if (!dropMetaData) {
                        MutationPlan plan = new PostDDLCompiler(connection).compile(tableRefs, null, null, Collections.<PColumn>emptyList(), ts);
                        // Delete everything in the column. You'll still be able to do queries at earlier timestamps
                        return connection.getQueryServices().updateData(plan);
                    }
                }
                break;
        }
        return new MutationState(0, 0, connection);
    } finally {
        connection.setAutoCommit(wasAutoCommit);
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) IOException(java.io.IOException) MutationPlan(org.apache.phoenix.compile.MutationPlan) PostDDLCompiler(org.apache.phoenix.compile.PostDDLCompiler) MutationCode(org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode) HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) MutationState(org.apache.phoenix.execute.MutationState) PUnsignedLong(org.apache.phoenix.schema.types.PUnsignedLong) PLong(org.apache.phoenix.schema.types.PLong) Mutation(org.apache.hadoop.hbase.client.Mutation) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)

Example 7 with MutationCode

use of org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode in project phoenix by apache.

the class MetaDataRegionObserver method updateIndexState.

private static void updateIndexState(PhoenixConnection conn, String indexTableName, RegionCoprocessorEnvironment env, PIndexState oldState, PIndexState newState) throws ServiceException, Throwable {
    byte[] indexTableKey = SchemaUtil.getTableKeyFromFullName(indexTableName);
    String schemaName = SchemaUtil.getSchemaNameFromFullName(indexTableName);
    String indexName = SchemaUtil.getTableNameFromFullName(indexTableName);
    // Mimic the Put that gets generated by the client on an update of the
    // index state
    Put put = new Put(indexTableKey);
    put.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.INDEX_STATE_BYTES, newState.getSerializedBytes());
    if (newState == PIndexState.ACTIVE) {
        put.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP_BYTES, PLong.INSTANCE.toBytes(0));
        put.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.ASYNC_REBUILD_TIMESTAMP_BYTES, PLong.INSTANCE.toBytes(0));
    }
    final List<Mutation> tableMetadata = Collections.<Mutation>singletonList(put);
    MetaDataMutationResult result = conn.getQueryServices().updateIndexState(tableMetadata, null);
    MutationCode code = result.getMutationCode();
    if (code == MutationCode.TABLE_NOT_FOUND) {
        throw new TableNotFoundException(schemaName, indexName);
    }
    if (code == MutationCode.UNALLOWED_TABLE_MUTATION) {
        throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_INDEX_STATE_TRANSITION).setMessage(" currentState=" + oldState + ". requestedState=" + newState).setSchemaName(schemaName).setTableName(indexName).build().buildException();
    }
}
Also used : TableNotFoundException(org.apache.phoenix.schema.TableNotFoundException) Mutation(org.apache.hadoop.hbase.client.Mutation) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo) Put(org.apache.hadoop.hbase.client.Put) MutationCode(org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode)

Example 8 with MutationCode

use of org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode in project phoenix by apache.

the class ConnectionQueryServicesImpl method dropTable.

@Override
public MetaDataMutationResult dropTable(final List<Mutation> tableMetaData, final PTableType tableType, final boolean cascade) throws SQLException {
    byte[][] rowKeyMetadata = new byte[3][];
    SchemaUtil.getVarChars(tableMetaData.get(0).getRow(), rowKeyMetadata);
    byte[] tenantIdBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
    byte[] schemaBytes = rowKeyMetadata[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
    byte[] tableBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
    byte[] tableKey = SchemaUtil.getTableKey(tenantIdBytes == null ? ByteUtil.EMPTY_BYTE_ARRAY : tenantIdBytes, schemaBytes, tableBytes);
    final MetaDataMutationResult result = metaDataCoprocessorExec(tableKey, new Batch.Call<MetaDataService, MetaDataResponse>() {

        @Override
        public MetaDataResponse call(MetaDataService instance) throws IOException {
            ServerRpcController controller = new ServerRpcController();
            BlockingRpcCallback<MetaDataResponse> rpcCallback = new BlockingRpcCallback<MetaDataResponse>();
            DropTableRequest.Builder builder = DropTableRequest.newBuilder();
            for (Mutation m : tableMetaData) {
                MutationProto mp = ProtobufUtil.toProto(m);
                builder.addTableMetadataMutations(mp.toByteString());
            }
            builder.setTableType(tableType.getSerializedValue());
            builder.setCascade(cascade);
            builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER));
            instance.dropTable(controller, builder.build(), rpcCallback);
            if (controller.getFailedOn() != null) {
                throw controller.getFailedOn();
            }
            return rpcCallback.get();
        }
    });
    final MutationCode code = result.getMutationCode();
    switch(code) {
        case TABLE_ALREADY_EXISTS:
            ReadOnlyProps props = this.getProps();
            boolean dropMetadata = props.getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA);
            PTable table = result.getTable();
            if (dropMetadata) {
                flushParentPhysicalTable(table);
                dropTables(result.getTableNamesToDelete());
            } else {
                invalidateTableStats(result.getTableNamesToDelete());
            }
            long timestamp = MetaDataUtil.getClientTimeStamp(tableMetaData);
            if (tableType == PTableType.TABLE) {
                byte[] physicalName = table.getPhysicalName().getBytes();
                ensureViewIndexTableDropped(physicalName, timestamp);
                ensureLocalIndexTableDropped(physicalName, timestamp);
                tableStatsCache.invalidateAll(table);
            }
            break;
        default:
            break;
    }
    return result;
}
Also used : MetaDataResponse(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse) KeyValueBuilder(org.apache.phoenix.hbase.index.util.KeyValueBuilder) NonTxIndexBuilder(org.apache.phoenix.hbase.index.covered.NonTxIndexBuilder) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) PhoenixIndexBuilder(org.apache.phoenix.index.PhoenixIndexBuilder) IOException(java.io.IOException) PhoenixIOException(org.apache.phoenix.exception.PhoenixIOException) ServerRpcController(org.apache.hadoop.hbase.ipc.ServerRpcController) MutationProto(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto) MutationCode(org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode) PTable(org.apache.phoenix.schema.PTable) ReadOnlyProps(org.apache.phoenix.util.ReadOnlyProps) MetaDataService(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService) Batch(org.apache.hadoop.hbase.client.coprocessor.Batch) BlockingRpcCallback(org.apache.hadoop.hbase.ipc.BlockingRpcCallback) Mutation(org.apache.hadoop.hbase.client.Mutation) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)

Example 9 with MutationCode

use of org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode in project phoenix by apache.

the class ConnectionQueryServicesImpl method dropSchema.

@Override
public MetaDataMutationResult dropSchema(final List<Mutation> schemaMetaData, final String schemaName) throws SQLException {
    final MetaDataMutationResult result = metaDataCoprocessorExec(SchemaUtil.getSchemaKey(schemaName), new Batch.Call<MetaDataService, MetaDataResponse>() {

        @Override
        public MetaDataResponse call(MetaDataService instance) throws IOException {
            ServerRpcController controller = new ServerRpcController();
            BlockingRpcCallback<MetaDataResponse> rpcCallback = new BlockingRpcCallback<MetaDataResponse>();
            DropSchemaRequest.Builder builder = DropSchemaRequest.newBuilder();
            for (Mutation m : schemaMetaData) {
                MutationProto mp = ProtobufUtil.toProto(m);
                builder.addSchemaMetadataMutations(mp.toByteString());
            }
            builder.setSchemaName(schemaName);
            builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER));
            instance.dropSchema(controller, builder.build(), rpcCallback);
            if (controller.getFailedOn() != null) {
                throw controller.getFailedOn();
            }
            return rpcCallback.get();
        }
    });
    final MutationCode code = result.getMutationCode();
    switch(code) {
        case SCHEMA_ALREADY_EXISTS:
            ReadOnlyProps props = this.getProps();
            boolean dropMetadata = props.getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA);
            if (dropMetadata) {
                ensureNamespaceDropped(schemaName, result.getMutationTime());
            }
            break;
        default:
            break;
    }
    return result;
}
Also used : MetaDataResponse(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse) KeyValueBuilder(org.apache.phoenix.hbase.index.util.KeyValueBuilder) NonTxIndexBuilder(org.apache.phoenix.hbase.index.covered.NonTxIndexBuilder) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) PhoenixIndexBuilder(org.apache.phoenix.index.PhoenixIndexBuilder) IOException(java.io.IOException) PhoenixIOException(org.apache.phoenix.exception.PhoenixIOException) ServerRpcController(org.apache.hadoop.hbase.ipc.ServerRpcController) MutationProto(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto) MutationCode(org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode) ReadOnlyProps(org.apache.phoenix.util.ReadOnlyProps) MetaDataService(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService) Batch(org.apache.hadoop.hbase.client.coprocessor.Batch) BlockingRpcCallback(org.apache.hadoop.hbase.ipc.BlockingRpcCallback) Mutation(org.apache.hadoop.hbase.client.Mutation) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)

Example 10 with MutationCode

use of org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode in project phoenix by apache.

the class ConnectionQueryServicesImpl method dropColumn.

@Override
public MetaDataMutationResult dropColumn(final List<Mutation> tableMetaData, PTableType tableType) throws SQLException {
    byte[][] rowKeyMetadata = new byte[3][];
    SchemaUtil.getVarChars(tableMetaData.get(0).getRow(), rowKeyMetadata);
    byte[] tenantIdBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
    byte[] schemaBytes = rowKeyMetadata[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
    byte[] tableBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
    byte[] tableKey = SchemaUtil.getTableKey(tenantIdBytes, schemaBytes, tableBytes);
    MetaDataMutationResult result = metaDataCoprocessorExec(tableKey, new Batch.Call<MetaDataService, MetaDataResponse>() {

        @Override
        public MetaDataResponse call(MetaDataService instance) throws IOException {
            ServerRpcController controller = new ServerRpcController();
            BlockingRpcCallback<MetaDataResponse> rpcCallback = new BlockingRpcCallback<MetaDataResponse>();
            DropColumnRequest.Builder builder = DropColumnRequest.newBuilder();
            for (Mutation m : tableMetaData) {
                MutationProto mp = ProtobufUtil.toProto(m);
                builder.addTableMetadataMutations(mp.toByteString());
            }
            builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER));
            instance.dropColumn(controller, builder.build(), rpcCallback);
            if (controller.getFailedOn() != null) {
                throw controller.getFailedOn();
            }
            return rpcCallback.get();
        }
    });
    final MutationCode code = result.getMutationCode();
    switch(code) {
        case TABLE_ALREADY_EXISTS:
            final ReadOnlyProps props = this.getProps();
            final boolean dropMetadata = props.getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA);
            if (dropMetadata) {
                dropTables(result.getTableNamesToDelete());
            } else {
                invalidateTableStats(result.getTableNamesToDelete());
            }
            break;
        default:
            break;
    }
    return result;
}
Also used : MetaDataResponse(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse) KeyValueBuilder(org.apache.phoenix.hbase.index.util.KeyValueBuilder) NonTxIndexBuilder(org.apache.phoenix.hbase.index.covered.NonTxIndexBuilder) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) PhoenixIndexBuilder(org.apache.phoenix.index.PhoenixIndexBuilder) IOException(java.io.IOException) PhoenixIOException(org.apache.phoenix.exception.PhoenixIOException) ServerRpcController(org.apache.hadoop.hbase.ipc.ServerRpcController) MutationProto(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto) MutationCode(org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode) ReadOnlyProps(org.apache.phoenix.util.ReadOnlyProps) MetaDataService(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService) Batch(org.apache.hadoop.hbase.client.coprocessor.Batch) BlockingRpcCallback(org.apache.hadoop.hbase.ipc.BlockingRpcCallback) Mutation(org.apache.hadoop.hbase.client.Mutation) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)

Aggregations

MutationCode (org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode)16 MetaDataMutationResult (org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)15 Mutation (org.apache.hadoop.hbase.client.Mutation)13 SQLExceptionInfo (org.apache.phoenix.exception.SQLExceptionInfo)8 MutationState (org.apache.phoenix.execute.MutationState)8 PLong (org.apache.phoenix.schema.types.PLong)8 PUnsignedLong (org.apache.phoenix.schema.types.PUnsignedLong)8 PreparedStatement (java.sql.PreparedStatement)5 ColumnDefInPkConstraint (org.apache.phoenix.parse.ColumnDefInPkConstraint)5 IndexKeyConstraint (org.apache.phoenix.parse.IndexKeyConstraint)5 PrimaryKeyConstraint (org.apache.phoenix.parse.PrimaryKeyConstraint)5 IOException (java.io.IOException)4 ArrayList (java.util.ArrayList)4 Pair (org.apache.hadoop.hbase.util.Pair)4 MutationPlan (org.apache.phoenix.compile.MutationPlan)4 PostDDLCompiler (org.apache.phoenix.compile.PostDDLCompiler)4 ThreadFactoryBuilder (com.google.common.util.concurrent.ThreadFactoryBuilder)3 HashMap (java.util.HashMap)3 LinkedHashMap (java.util.LinkedHashMap)3 Delete (org.apache.hadoop.hbase.client.Delete)3