Search in sources :

Example 16 with MetaDataMutationResult

use of org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult in project phoenix by apache.

the class MetaDataClient method dropTable.

private MutationState dropTable(String schemaName, String tableName, String parentTableName, PTableType tableType, boolean ifExists, boolean cascade) throws SQLException {
    connection.rollback();
    boolean wasAutoCommit = connection.getAutoCommit();
    try {
        PName tenantId = connection.getTenantId();
        String tenantIdStr = tenantId == null ? null : tenantId.getString();
        byte[] key = SchemaUtil.getTableKey(tenantIdStr, schemaName, tableName);
        Long scn = connection.getSCN();
        long clientTimeStamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
        List<Mutation> tableMetaData = Lists.newArrayListWithExpectedSize(2);
        Delete tableDelete = new Delete(key, clientTimeStamp);
        tableMetaData.add(tableDelete);
        boolean hasViewIndexTable = false;
        if (parentTableName != null) {
            byte[] linkKey = MetaDataUtil.getParentLinkKey(tenantIdStr, schemaName, parentTableName, tableName);
            Delete linkDelete = new Delete(linkKey, clientTimeStamp);
            tableMetaData.add(linkDelete);
        }
        MetaDataMutationResult result = connection.getQueryServices().dropTable(tableMetaData, tableType, cascade);
        MutationCode code = result.getMutationCode();
        PTable table = result.getTable();
        switch(code) {
            case TABLE_NOT_FOUND:
                if (!ifExists) {
                    throw new TableNotFoundException(schemaName, tableName);
                }
                break;
            case NEWER_TABLE_FOUND:
                throw new NewerTableAlreadyExistsException(schemaName, tableName, result.getTable());
            case UNALLOWED_TABLE_MUTATION:
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_MUTATE_TABLE).setSchemaName(schemaName).setTableName(tableName).build().buildException();
            default:
                connection.removeTable(tenantId, SchemaUtil.getTableName(schemaName, tableName), parentTableName, result.getMutationTime());
                if (table != null) {
                    boolean dropMetaData = false;
                    long ts = (scn == null ? result.getMutationTime() : scn);
                    List<TableRef> tableRefs = Lists.newArrayListWithExpectedSize(2 + table.getIndexes().size());
                    connection.setAutoCommit(true);
                    if (tableType == PTableType.VIEW) {
                        for (PTable index : table.getIndexes()) {
                            tableRefs.add(new TableRef(null, index, ts, false));
                        }
                    } else {
                        dropMetaData = result.getTable().getViewIndexId() == null && connection.getQueryServices().getProps().getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA);
                        // All multi-tenant tables have a view index table, so no need to check in that case
                        if (parentTableName == null) {
                            // keeping always true for deletion of stats if view index present
                            hasViewIndexTable = true;
                            // or not
                            MetaDataUtil.deleteViewIndexSequences(connection, table.getPhysicalName(), table.isNamespaceMapped());
                            byte[] viewIndexPhysicalName = MetaDataUtil.getViewIndexPhysicalName(table.getPhysicalName().getBytes());
                            if (!dropMetaData) {
                                // we need to drop rows only when actually view index exists
                                try (HBaseAdmin admin = connection.getQueryServices().getAdmin()) {
                                    hasViewIndexTable = admin.tableExists(viewIndexPhysicalName);
                                } catch (IOException e1) {
                                // absorbing as it is not critical check
                                }
                            }
                        }
                        if (tableType == PTableType.TABLE && (table.isMultiTenant() || hasViewIndexTable)) {
                            if (hasViewIndexTable) {
                                byte[] viewIndexPhysicalName = MetaDataUtil.getViewIndexPhysicalName(table.getPhysicalName().getBytes());
                                PTable viewIndexTable = new PTableImpl(null, SchemaUtil.getSchemaNameFromFullName(viewIndexPhysicalName), SchemaUtil.getTableNameFromFullName(viewIndexPhysicalName), ts, table.getColumnFamilies(), table.isNamespaceMapped(), table.getImmutableStorageScheme(), table.getEncodingScheme(), table.useStatsForParallelization());
                                tableRefs.add(new TableRef(null, viewIndexTable, ts, false));
                            }
                        }
                        tableRefs.add(new TableRef(null, table, ts, false));
                        // TODO: Let the standard mutable secondary index maintenance handle this?
                        for (PTable index : table.getIndexes()) {
                            tableRefs.add(new TableRef(null, index, ts, false));
                        }
                        deleteFromStatsTable(tableRefs, ts);
                    }
                    if (!dropMetaData) {
                        MutationPlan plan = new PostDDLCompiler(connection).compile(tableRefs, null, null, Collections.<PColumn>emptyList(), ts);
                        // Delete everything in the column. You'll still be able to do queries at earlier timestamps
                        return connection.getQueryServices().updateData(plan);
                    }
                }
                break;
        }
        return new MutationState(0, 0, connection);
    } finally {
        connection.setAutoCommit(wasAutoCommit);
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) IOException(java.io.IOException) MutationPlan(org.apache.phoenix.compile.MutationPlan) PostDDLCompiler(org.apache.phoenix.compile.PostDDLCompiler) MutationCode(org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode) HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) MutationState(org.apache.phoenix.execute.MutationState) PUnsignedLong(org.apache.phoenix.schema.types.PUnsignedLong) PLong(org.apache.phoenix.schema.types.PLong) Mutation(org.apache.hadoop.hbase.client.Mutation) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)

Example 17 with MetaDataMutationResult

use of org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult in project phoenix by apache.

the class MetaDataRegionObserver method updateIndexState.

private static void updateIndexState(PhoenixConnection conn, String indexTableName, RegionCoprocessorEnvironment env, PIndexState oldState, PIndexState newState) throws ServiceException, Throwable {
    byte[] indexTableKey = SchemaUtil.getTableKeyFromFullName(indexTableName);
    String schemaName = SchemaUtil.getSchemaNameFromFullName(indexTableName);
    String indexName = SchemaUtil.getTableNameFromFullName(indexTableName);
    // Mimic the Put that gets generated by the client on an update of the
    // index state
    Put put = new Put(indexTableKey);
    put.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.INDEX_STATE_BYTES, newState.getSerializedBytes());
    if (newState == PIndexState.ACTIVE) {
        put.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP_BYTES, PLong.INSTANCE.toBytes(0));
        put.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.ASYNC_REBUILD_TIMESTAMP_BYTES, PLong.INSTANCE.toBytes(0));
    }
    final List<Mutation> tableMetadata = Collections.<Mutation>singletonList(put);
    MetaDataMutationResult result = conn.getQueryServices().updateIndexState(tableMetadata, null);
    MutationCode code = result.getMutationCode();
    if (code == MutationCode.TABLE_NOT_FOUND) {
        throw new TableNotFoundException(schemaName, indexName);
    }
    if (code == MutationCode.UNALLOWED_TABLE_MUTATION) {
        throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_INDEX_STATE_TRANSITION).setMessage(" currentState=" + oldState + ". requestedState=" + newState).setSchemaName(schemaName).setTableName(indexName).build().buildException();
    }
}
Also used : TableNotFoundException(org.apache.phoenix.schema.TableNotFoundException) Mutation(org.apache.hadoop.hbase.client.Mutation) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo) Put(org.apache.hadoop.hbase.client.Put) MutationCode(org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode)

Example 18 with MetaDataMutationResult

use of org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult in project phoenix by apache.

the class MetaDataClient method dropFunction.

private MutationState dropFunction(String functionName, boolean ifExists) throws SQLException {
    connection.rollback();
    boolean wasAutoCommit = connection.getAutoCommit();
    try {
        PName tenantId = connection.getTenantId();
        byte[] key = SchemaUtil.getFunctionKey(tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : tenantId.getBytes(), Bytes.toBytes(functionName));
        Long scn = connection.getSCN();
        long clientTimeStamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
        try {
            PFunction function = connection.getMetaDataCache().getFunction(new PTableKey(tenantId, functionName));
            if (function.isTemporaryFunction()) {
                connection.removeFunction(tenantId, functionName, clientTimeStamp);
                return new MutationState(0, 0, connection);
            }
        } catch (FunctionNotFoundException e) {
        }
        List<Mutation> functionMetaData = Lists.newArrayListWithExpectedSize(2);
        Delete functionDelete = new Delete(key, clientTimeStamp);
        functionMetaData.add(functionDelete);
        MetaDataMutationResult result = connection.getQueryServices().dropFunction(functionMetaData, ifExists);
        MutationCode code = result.getMutationCode();
        switch(code) {
            case FUNCTION_NOT_FOUND:
                if (!ifExists) {
                    throw new FunctionNotFoundException(functionName);
                }
                break;
            default:
                connection.removeFunction(tenantId, functionName, result.getMutationTime());
                break;
        }
        return new MutationState(0, 0, connection);
    } finally {
        connection.setAutoCommit(wasAutoCommit);
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) PFunction(org.apache.phoenix.parse.PFunction) MutationCode(org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode) MutationState(org.apache.phoenix.execute.MutationState) PUnsignedLong(org.apache.phoenix.schema.types.PUnsignedLong) PLong(org.apache.phoenix.schema.types.PLong) Mutation(org.apache.hadoop.hbase.client.Mutation) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)

Example 19 with MetaDataMutationResult

use of org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult in project phoenix by apache.

the class MetaDataClient method createTable.

public MutationState createTable(CreateTableStatement statement, byte[][] splits, PTable parent, String viewStatement, ViewType viewType, byte[][] viewColumnConstants, BitSet isViewColumnReferenced) throws SQLException {
    TableName tableName = statement.getTableName();
    Map<String, Object> tableProps = Maps.newHashMapWithExpectedSize(statement.getProps().size());
    Map<String, Object> commonFamilyProps = Maps.newHashMapWithExpectedSize(statement.getProps().size() + 1);
    populatePropertyMaps(statement.getProps(), tableProps, commonFamilyProps);
    boolean isAppendOnlySchema = false;
    long updateCacheFrequency = connection.getQueryServices().getProps().getLong(QueryServices.DEFAULT_UPDATE_CACHE_FREQUENCY_ATRRIB, QueryServicesOptions.DEFAULT_UPDATE_CACHE_FREQUENCY);
    if (parent == null) {
        Boolean appendOnlySchemaProp = (Boolean) TableProperty.APPEND_ONLY_SCHEMA.getValue(tableProps);
        if (appendOnlySchemaProp != null) {
            isAppendOnlySchema = appendOnlySchemaProp;
        }
        Long updateCacheFrequencyProp = (Long) TableProperty.UPDATE_CACHE_FREQUENCY.getValue(tableProps);
        if (updateCacheFrequencyProp != null) {
            updateCacheFrequency = updateCacheFrequencyProp;
        }
    } else {
        isAppendOnlySchema = parent.isAppendOnlySchema();
        updateCacheFrequency = parent.getUpdateCacheFrequency();
    }
    // updateCacheFrequency cannot be set to ALWAYS if isAppendOnlySchema is true
    if (isAppendOnlySchema && updateCacheFrequency == 0) {
        throw new SQLExceptionInfo.Builder(SQLExceptionCode.UPDATE_CACHE_FREQUENCY_INVALID).setSchemaName(tableName.getSchemaName()).setTableName(tableName.getTableName()).build().buildException();
    }
    Boolean immutableProp = (Boolean) TableProperty.IMMUTABLE_ROWS.getValue(tableProps);
    if (statement.immutableRows() != null && immutableProp != null) {
        throw new SQLExceptionInfo.Builder(SQLExceptionCode.IMMUTABLE_TABLE_PROPERTY_INVALID).setSchemaName(tableName.getSchemaName()).setTableName(tableName.getTableName()).build().buildException();
    }
    PTable table = null;
    // if it is add columns that are not already present
    if (isAppendOnlySchema) {
        // look up the table in the cache
        MetaDataMutationResult result = updateCache(tableName.getSchemaName(), tableName.getTableName());
        if (result.getMutationCode() == MutationCode.TABLE_ALREADY_EXISTS) {
            table = result.getTable();
            if (!statement.ifNotExists()) {
                throw new NewerTableAlreadyExistsException(tableName.getSchemaName(), tableName.getTableName(), table);
            }
            List<ColumnDef> columnDefs = statement.getColumnDefs();
            PrimaryKeyConstraint pkConstraint = statement.getPrimaryKeyConstraint();
            // get the list of columns to add
            for (ColumnDef columnDef : columnDefs) {
                if (pkConstraint.contains(columnDef.getColumnDefName())) {
                    columnDef.setIsPK(true);
                }
            }
            // if there are new columns to add
            return addColumn(table, columnDefs, statement.getProps(), statement.ifNotExists(), true, NamedTableNode.create(statement.getTableName()), statement.getTableType());
        }
    }
    table = createTableInternal(statement, splits, parent, viewStatement, viewType, viewColumnConstants, isViewColumnReferenced, false, null, null, tableProps, commonFamilyProps);
    if (table == null || table.getType() == PTableType.VIEW) /*|| table.isTransactional()*/
    {
        return new MutationState(0, 0, connection);
    }
    // Hack to get around the case when an SCN is specified on the connection.
    // In this case, we won't see the table we just created yet, so we hack
    // around it by forcing the compiler to not resolve anything.
    PostDDLCompiler compiler = new PostDDLCompiler(connection);
    //connection.setAutoCommit(true);
    // Execute any necessary data updates
    Long scn = connection.getSCN();
    long ts = (scn == null ? table.getTimeStamp() : scn);
    // Getting the schema through the current connection doesn't work when the connection has an scn specified
    // Since the table won't be added to the current connection.
    TableRef tableRef = new TableRef(null, table, ts, false);
    byte[] emptyCF = SchemaUtil.getEmptyColumnFamily(table);
    MutationPlan plan = compiler.compile(Collections.singletonList(tableRef), emptyCF, null, null, tableRef.getTimeStamp());
    return connection.getQueryServices().updateData(plan);
}
Also used : ColumnDef(org.apache.phoenix.parse.ColumnDef) PostDDLCompiler(org.apache.phoenix.compile.PostDDLCompiler) MutationPlan(org.apache.phoenix.compile.MutationPlan) PrimaryKeyConstraint(org.apache.phoenix.parse.PrimaryKeyConstraint) TableName(org.apache.phoenix.parse.TableName) MutationState(org.apache.phoenix.execute.MutationState) PUnsignedLong(org.apache.phoenix.schema.types.PUnsignedLong) PLong(org.apache.phoenix.schema.types.PLong) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)

Example 20 with MetaDataMutationResult

use of org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult in project phoenix by apache.

the class MetaDataClient method dropSchema.

public MutationState dropSchema(DropSchemaStatement executableDropSchemaStatement) throws SQLException {
    connection.rollback();
    boolean wasAutoCommit = connection.getAutoCommit();
    try {
        PSchema schema = new PSchema(executableDropSchemaStatement.getSchemaName());
        String schemaName = schema.getSchemaName();
        boolean ifExists = executableDropSchemaStatement.ifExists();
        byte[] key = SchemaUtil.getSchemaKey(schemaName);
        Long scn = connection.getSCN();
        long clientTimeStamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
        List<Mutation> schemaMetaData = Lists.newArrayListWithExpectedSize(2);
        Delete schemaDelete = new Delete(key, clientTimeStamp);
        schemaMetaData.add(schemaDelete);
        MetaDataMutationResult result = connection.getQueryServices().dropSchema(schemaMetaData, schemaName);
        MutationCode code = result.getMutationCode();
        schema = result.getSchema();
        switch(code) {
            case SCHEMA_NOT_FOUND:
                if (!ifExists) {
                    throw new SchemaNotFoundException(schemaName);
                }
                break;
            case NEWER_SCHEMA_FOUND:
                throw new NewerSchemaAlreadyExistsException(schemaName);
            case TABLES_EXIST_ON_SCHEMA:
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_MUTATE_SCHEMA).setSchemaName(schemaName).build().buildException();
            default:
                connection.removeSchema(schema, result.getMutationTime());
                break;
        }
        return new MutationState(0, 0, connection);
    } finally {
        connection.setAutoCommit(wasAutoCommit);
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) PSchema(org.apache.phoenix.parse.PSchema) MutationCode(org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode) MutationState(org.apache.phoenix.execute.MutationState) PUnsignedLong(org.apache.phoenix.schema.types.PUnsignedLong) PLong(org.apache.phoenix.schema.types.PLong) Mutation(org.apache.hadoop.hbase.client.Mutation) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo)

Aggregations

MetaDataMutationResult (org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)36 Mutation (org.apache.hadoop.hbase.client.Mutation)20 MutationCode (org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode)15 PLong (org.apache.phoenix.schema.types.PLong)11 MutationState (org.apache.phoenix.execute.MutationState)10 PTable (org.apache.phoenix.schema.PTable)10 PUnsignedLong (org.apache.phoenix.schema.types.PUnsignedLong)9 IOException (java.io.IOException)8 BlockingRpcCallback (org.apache.hadoop.hbase.ipc.BlockingRpcCallback)8 ServerRpcController (org.apache.hadoop.hbase.ipc.ServerRpcController)8 MutationProto (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto)8 MetaDataResponse (org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse)8 MetaDataService (org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService)8 SQLExceptionInfo (org.apache.phoenix.exception.SQLExceptionInfo)8 PhoenixIndexBuilder (org.apache.phoenix.index.PhoenixIndexBuilder)8 TableNotFoundException (org.apache.phoenix.schema.TableNotFoundException)8 ThreadFactoryBuilder (com.google.common.util.concurrent.ThreadFactoryBuilder)7 ArrayList (java.util.ArrayList)7 HashMap (java.util.HashMap)7 Batch (org.apache.hadoop.hbase.client.coprocessor.Batch)7