Search in sources :

Example 11 with MetaDataMutationResult

use of org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult in project phoenix by apache.

the class MetaDataClient method addIndexesFromParentTable.

/**
     * Fault in the parent table to the cache and add any indexes it has to the indexes
     * of the table for which we just updated.
     * TODO: combine this round trip with the one that updates the cache for the child table.
     * @param result the result from updating the cache for the current table.
     * @param resolvedTimestamp timestamp at which child table was resolved
     * @return true if the PTable contained by result was modified and false otherwise
     * @throws SQLException if the physical table cannot be found
     */
private boolean addIndexesFromParentTable(MetaDataMutationResult result, Long resolvedTimestamp) throws SQLException {
    PTable view = result.getTable();
    // If not a view or if a view directly over an HBase table, there's nothing to do
    if (view.getType() != PTableType.VIEW || view.getViewType() == ViewType.MAPPED) {
        return false;
    }
    // a view on a table will not have a parent name but will have a physical table name (which is the parent)
    String parentName = view.getParentName().getString();
    String schemaName = SchemaUtil.getSchemaNameFromFullName(parentName);
    String tableName = SchemaUtil.getTableNameFromFullName(parentName);
    MetaDataMutationResult parentResult = updateCache(connection.getTenantId(), schemaName, tableName, false, resolvedTimestamp);
    PTable parentTable = parentResult.getTable();
    if (parentTable == null) {
        throw new TableNotFoundException(schemaName, tableName);
    }
    if (!result.wasUpdated() && !parentResult.wasUpdated()) {
        return false;
    }
    List<PTable> parentTableIndexes = parentTable.getIndexes();
    if (parentTableIndexes.isEmpty()) {
        return false;
    }
    // Filter out indexes if column doesn't exist in view
    List<PTable> indexesToAdd = Lists.newArrayListWithExpectedSize(parentTableIndexes.size() + view.getIndexes().size());
    if (result.wasUpdated()) {
        // Table from server never contains inherited indexes
        indexesToAdd.addAll(view.getIndexes());
    } else {
        // Only add original ones, as inherited ones may have changed
        for (PTable index : view.getIndexes()) {
            // Original indexes will not have a view statement while inherited ones will
            if (index.getViewStatement() == null) {
                indexesToAdd.add(index);
            }
        }
    }
    for (PTable index : parentTableIndexes) {
        boolean containsAllReqdCols = true;
        // Ensure that all columns required to create index exist in the view too,
        // since view columns may be removed.
        IndexMaintainer indexMaintainer = index.getIndexMaintainer(parentTable, connection);
        // Check that the columns required for the index pk are present in the view
        Set<Pair<String, String>> indexedColInfos = indexMaintainer.getIndexedColumnInfo();
        for (Pair<String, String> colInfo : indexedColInfos) {
            try {
                String colFamily = colInfo.getFirst();
                String colName = colInfo.getSecond();
                if (colFamily == null) {
                    view.getColumnForColumnName(colName);
                } else {
                    view.getColumnFamily(colFamily).getPColumnForColumnName(colName);
                }
            } catch (ColumnNotFoundException e) {
                containsAllReqdCols = false;
                break;
            }
        }
        // all exist in the index on the parent table.
        for (PColumn col : view.getColumns()) {
            if (col.getViewConstant() != null) {
                try {
                    // It'd be possible to use a local index that doesn't have all view constants,
                    // but the WHERE clause for the view statement (which is added to the index below)
                    // would fail to compile.
                    String indexColumnName = IndexUtil.getIndexColumnName(col);
                    index.getColumnForColumnName(indexColumnName);
                } catch (ColumnNotFoundException e1) {
                    PColumn indexCol = null;
                    try {
                        String cf = col.getFamilyName() != null ? col.getFamilyName().getString() : null;
                        String colName = col.getName().getString();
                        if (cf != null) {
                            indexCol = parentTable.getColumnFamily(cf).getPColumnForColumnName(colName);
                        } else {
                            indexCol = parentTable.getColumnForColumnName(colName);
                        }
                    } catch (ColumnNotFoundException e2) {
                        // Ignore this index and continue with others
                        containsAllReqdCols = false;
                        break;
                    }
                    if (indexCol.getViewConstant() == null || Bytes.compareTo(indexCol.getViewConstant(), col.getViewConstant()) != 0) {
                        containsAllReqdCols = false;
                        break;
                    }
                }
            }
        }
        if (containsAllReqdCols) {
            // Tack on view statement to index to get proper filtering for view
            String viewStatement = IndexUtil.rewriteViewStatement(connection, index, parentTable, view.getViewStatement());
            PName modifiedIndexName = PNameFactory.newName(index.getSchemaName().getString() + QueryConstants.CHILD_VIEW_INDEX_NAME_SEPARATOR + index.getName().getString() + QueryConstants.CHILD_VIEW_INDEX_NAME_SEPARATOR + view.getName().getString());
            // add the index table with a new name so that it does not conflict with the existing index table
            // also set update cache frequency to never since the renamed index is not present on the server
            indexesToAdd.add(PTableImpl.makePTable(index, modifiedIndexName, viewStatement, Long.MAX_VALUE, view.getTenantId()));
        }
    }
    PTable allIndexesTable = PTableImpl.makePTable(view, view.getTimeStamp(), indexesToAdd);
    result.setTable(allIndexesTable);
    return true;
}
Also used : IndexMaintainer(org.apache.phoenix.index.IndexMaintainer) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) Pair(org.apache.hadoop.hbase.util.Pair)

Example 12 with MetaDataMutationResult

use of org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult in project phoenix by apache.

the class MetaDataClient method dropColumn.

public MutationState dropColumn(DropColumnStatement statement) throws SQLException {
    connection.rollback();
    boolean wasAutoCommit = connection.getAutoCommit();
    try {
        connection.setAutoCommit(false);
        PName tenantId = connection.getTenantId();
        TableName tableNameNode = statement.getTable().getName();
        String schemaName = tableNameNode.getSchemaName();
        String tableName = tableNameNode.getTableName();
        String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
        boolean retried = false;
        while (true) {
            final ColumnResolver resolver = FromCompiler.getResolver(statement, connection);
            TableRef tableRef = resolver.getTables().get(0);
            PTable table = tableRef.getTable();
            List<ColumnName> columnRefs = statement.getColumnRefs();
            if (columnRefs == null) {
                columnRefs = Lists.newArrayListWithCapacity(0);
            }
            List<ColumnRef> columnsToDrop = Lists.newArrayListWithExpectedSize(columnRefs.size() + table.getIndexes().size());
            List<TableRef> indexesToDrop = Lists.newArrayListWithExpectedSize(table.getIndexes().size());
            List<Mutation> tableMetaData = Lists.newArrayListWithExpectedSize((table.getIndexes().size() + 1) * (1 + table.getColumns().size() - columnRefs.size()));
            List<PColumn> tableColumnsToDrop = Lists.newArrayListWithExpectedSize(columnRefs.size());
            for (ColumnName column : columnRefs) {
                ColumnRef columnRef = null;
                try {
                    columnRef = resolver.resolveColumn(null, column.getFamilyName(), column.getColumnName());
                } catch (ColumnNotFoundException e) {
                    if (statement.ifExists()) {
                        return new MutationState(0, 0, connection);
                    }
                    throw e;
                }
                PColumn columnToDrop = columnRef.getColumn();
                tableColumnsToDrop.add(columnToDrop);
                if (SchemaUtil.isPKColumn(columnToDrop)) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_DROP_PK).setColumnName(columnToDrop.getName().getString()).build().buildException();
                } else if (table.isAppendOnlySchema()) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_DROP_COL_APPEND_ONLY_SCHEMA).setColumnName(columnToDrop.getName().getString()).build().buildException();
                }
                columnsToDrop.add(new ColumnRef(columnRef.getTableRef(), columnToDrop.getPosition()));
            }
            dropColumnMutations(table, tableColumnsToDrop);
            boolean removedIndexTableOrColumn = false;
            Long timeStamp = table.isTransactional() ? tableRef.getTimeStamp() : null;
            for (PTable index : table.getIndexes()) {
                IndexMaintainer indexMaintainer = index.getIndexMaintainer(table, connection);
                // get the covered columns 
                List<PColumn> indexColumnsToDrop = Lists.newArrayListWithExpectedSize(columnRefs.size());
                Set<Pair<String, String>> indexedColsInfo = indexMaintainer.getIndexedColumnInfo();
                Set<ColumnReference> coveredCols = indexMaintainer.getCoveredColumns();
                for (PColumn columnToDrop : tableColumnsToDrop) {
                    Pair<String, String> columnToDropInfo = new Pair<>(columnToDrop.getFamilyName().getString(), columnToDrop.getName().getString());
                    ColumnReference colDropRef = new ColumnReference(columnToDrop.getFamilyName() == null ? null : columnToDrop.getFamilyName().getBytes(), columnToDrop.getColumnQualifierBytes());
                    boolean isColumnIndexed = indexedColsInfo.contains(columnToDropInfo);
                    if (isColumnIndexed) {
                        if (index.getViewIndexId() == null) {
                            indexesToDrop.add(new TableRef(index));
                        }
                        connection.removeTable(tenantId, SchemaUtil.getTableName(schemaName, index.getName().getString()), index.getParentName() == null ? null : index.getParentName().getString(), index.getTimeStamp());
                        removedIndexTableOrColumn = true;
                    } else if (coveredCols.contains(colDropRef)) {
                        String indexColumnName = IndexUtil.getIndexColumnName(columnToDrop);
                        PColumn indexColumn = index.getColumnForColumnName(indexColumnName);
                        indexColumnsToDrop.add(indexColumn);
                        // add the index column to be dropped so that we actually delete the column values
                        columnsToDrop.add(new ColumnRef(new TableRef(index), indexColumn.getPosition()));
                        removedIndexTableOrColumn = true;
                    }
                }
                if (!indexColumnsToDrop.isEmpty()) {
                    long indexTableSeqNum = incrementTableSeqNum(index, index.getType(), -indexColumnsToDrop.size(), null, null);
                    dropColumnMutations(index, indexColumnsToDrop);
                    long clientTimestamp = MutationState.getMutationTimestamp(timeStamp, connection.getSCN());
                    connection.removeColumn(tenantId, index.getName().getString(), indexColumnsToDrop, clientTimestamp, indexTableSeqNum, TransactionUtil.getResolvedTimestamp(connection, index.isTransactional(), clientTimestamp));
                }
            }
            tableMetaData.addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond());
            connection.rollback();
            long seqNum = incrementTableSeqNum(table, statement.getTableType(), -tableColumnsToDrop.size(), null, null);
            tableMetaData.addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond());
            connection.rollback();
            // Force table header to be first in list
            Collections.reverse(tableMetaData);
            /*
                 * Ensure our "empty column family to be" exists. Somewhat of an edge case, but can occur if we drop the last column
                 * in a column family that was the empty column family. In that case, we have to pick another one. If there are no other
                 * ones, then we need to create our default empty column family. Note that this may no longer be necessary once we
                 * support declaring what the empty column family is on a table, as:
                 * - If you declare it, we'd just ensure it's created at DDL time and never switch what it is unless you change it
                 * - If you don't declare it, we can just continue to use the old empty column family in this case, dynamically updating
                 *    the empty column family name on the PTable.
                 */
            for (ColumnRef columnRefToDrop : columnsToDrop) {
                PTable tableContainingColumnToDrop = columnRefToDrop.getTable();
                byte[] emptyCF = getNewEmptyColumnFamilyOrNull(tableContainingColumnToDrop, columnRefToDrop.getColumn());
                if (emptyCF != null) {
                    try {
                        tableContainingColumnToDrop.getColumnFamily(emptyCF);
                    } catch (ColumnFamilyNotFoundException e) {
                        // Only if it's not already a column family do we need to ensure it's created
                        Map<String, List<Pair<String, Object>>> family = new HashMap<>(1);
                        family.put(Bytes.toString(emptyCF), Collections.<Pair<String, Object>>emptyList());
                        // Just use a Put without any key values as the Mutation, as addColumn will treat this specially
                        // TODO: pass through schema name and table name instead to these methods as it's cleaner
                        byte[] tenantIdBytes = connection.getTenantId() == null ? null : connection.getTenantId().getBytes();
                        if (tenantIdBytes == null)
                            tenantIdBytes = ByteUtil.EMPTY_BYTE_ARRAY;
                        connection.getQueryServices().addColumn(Collections.<Mutation>singletonList(new Put(SchemaUtil.getTableKey(tenantIdBytes, tableContainingColumnToDrop.getSchemaName().getBytes(), tableContainingColumnToDrop.getTableName().getBytes()))), tableContainingColumnToDrop, family, Sets.newHashSet(Bytes.toString(emptyCF)), Collections.<PColumn>emptyList());
                    }
                }
            }
            MetaDataMutationResult result = connection.getQueryServices().dropColumn(tableMetaData, statement.getTableType());
            try {
                MutationCode code = processMutationResult(schemaName, tableName, result);
                if (code == MutationCode.COLUMN_NOT_FOUND) {
                    addTableToCache(result);
                    if (!statement.ifExists()) {
                        throw new ColumnNotFoundException(schemaName, tableName, Bytes.toString(result.getFamilyName()), Bytes.toString(result.getColumnName()));
                    }
                    return new MutationState(0, 0, connection);
                }
                // the server when needed.
                if (tableColumnsToDrop.size() > 0) {
                    if (removedIndexTableOrColumn)
                        connection.removeTable(tenantId, tableName, table.getParentName() == null ? null : table.getParentName().getString(), table.getTimeStamp());
                    else
                        connection.removeColumn(tenantId, SchemaUtil.getTableName(schemaName, tableName), tableColumnsToDrop, result.getMutationTime(), seqNum, TransactionUtil.getResolvedTime(connection, result));
                }
                // If we have a VIEW, then only delete the metadata, and leave the table data alone
                if (table.getType() != PTableType.VIEW) {
                    MutationState state = null;
                    connection.setAutoCommit(true);
                    Long scn = connection.getSCN();
                    // Delete everything in the column. You'll still be able to do queries at earlier timestamps
                    long ts = (scn == null ? result.getMutationTime() : scn);
                    PostDDLCompiler compiler = new PostDDLCompiler(connection);
                    boolean dropMetaData = connection.getQueryServices().getProps().getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA);
                    // if the index is a local index or view index it uses a shared physical table
                    // so we need to issue deletes markers for all the rows of the index
                    final List<TableRef> tableRefsToDrop = Lists.newArrayList();
                    Map<String, List<TableRef>> tenantIdTableRefMap = Maps.newHashMap();
                    if (result.getSharedTablesToDelete() != null) {
                        for (SharedTableState sharedTableState : result.getSharedTablesToDelete()) {
                            PTableImpl viewIndexTable = new PTableImpl(sharedTableState.getTenantId(), sharedTableState.getSchemaName(), sharedTableState.getTableName(), ts, table.getColumnFamilies(), sharedTableState.getColumns(), sharedTableState.getPhysicalNames(), sharedTableState.getViewIndexId(), table.isMultiTenant(), table.isNamespaceMapped(), table.getImmutableStorageScheme(), table.getEncodingScheme(), table.getEncodedCQCounter(), table.useStatsForParallelization());
                            TableRef indexTableRef = new TableRef(viewIndexTable);
                            PName indexTableTenantId = sharedTableState.getTenantId();
                            if (indexTableTenantId == null) {
                                tableRefsToDrop.add(indexTableRef);
                            } else {
                                if (!tenantIdTableRefMap.containsKey(indexTableTenantId)) {
                                    tenantIdTableRefMap.put(indexTableTenantId.getString(), Lists.<TableRef>newArrayList());
                                }
                                tenantIdTableRefMap.get(indexTableTenantId.getString()).add(indexTableRef);
                            }
                        }
                    }
                    // they would have been dropped in ConnectionQueryServices.dropColumn)
                    if (!dropMetaData) {
                        tableRefsToDrop.addAll(indexesToDrop);
                    }
                    // Drop any index tables that had the dropped column in the PK
                    state = connection.getQueryServices().updateData(compiler.compile(tableRefsToDrop, null, null, Collections.<PColumn>emptyList(), ts));
                    // Drop any tenant-specific indexes
                    if (!tenantIdTableRefMap.isEmpty()) {
                        for (Entry<String, List<TableRef>> entry : tenantIdTableRefMap.entrySet()) {
                            String indexTenantId = entry.getKey();
                            Properties props = new Properties(connection.getClientInfo());
                            props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, indexTenantId);
                            try (PhoenixConnection tenantConn = new PhoenixConnection(connection, connection.getQueryServices(), props)) {
                                PostDDLCompiler dropCompiler = new PostDDLCompiler(tenantConn);
                                state = tenantConn.getQueryServices().updateData(dropCompiler.compile(entry.getValue(), null, null, Collections.<PColumn>emptyList(), ts));
                            }
                        }
                    }
                    // See https://issues.apache.org/jira/browse/PHOENIX-3605
                    if (!table.isImmutableRows() || table.getImmutableStorageScheme() == ImmutableStorageScheme.ONE_CELL_PER_COLUMN) {
                        // Update empty key value column if necessary
                        for (ColumnRef droppedColumnRef : columnsToDrop) {
                            // Painful, but we need a TableRef with a pre-set timestamp to prevent attempts
                            // to get any updates from the region server.
                            // TODO: move this into PostDDLCompiler
                            // TODO: consider filtering mutable indexes here, but then the issue is that
                            // we'd need to force an update of the data row empty key value if a mutable
                            // secondary index is changing its empty key value family.
                            droppedColumnRef = droppedColumnRef.cloneAtTimestamp(ts);
                            TableRef droppedColumnTableRef = droppedColumnRef.getTableRef();
                            PColumn droppedColumn = droppedColumnRef.getColumn();
                            MutationPlan plan = compiler.compile(Collections.singletonList(droppedColumnTableRef), getNewEmptyColumnFamilyOrNull(droppedColumnTableRef.getTable(), droppedColumn), null, Collections.singletonList(droppedColumn), ts);
                            state = connection.getQueryServices().updateData(plan);
                        }
                    }
                    // Return the last MutationState
                    return state;
                }
                return new MutationState(0, 0, connection);
            } catch (ConcurrentTableMutationException e) {
                if (retried) {
                    throw e;
                }
                table = connection.getTable(new PTableKey(tenantId, fullTableName));
                retried = true;
            }
        }
    } finally {
        connection.setAutoCommit(wasAutoCommit);
    }
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) PostDDLCompiler(org.apache.phoenix.compile.PostDDLCompiler) Properties(java.util.Properties) IndexMaintainer(org.apache.phoenix.index.IndexMaintainer) ArrayList(java.util.ArrayList) List(java.util.List) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) ColumnResolver(org.apache.phoenix.compile.ColumnResolver) Pair(org.apache.hadoop.hbase.util.Pair) MutationPlan(org.apache.phoenix.compile.MutationPlan) Put(org.apache.hadoop.hbase.client.Put) MutationCode(org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode) TableName(org.apache.phoenix.parse.TableName) ColumnName(org.apache.phoenix.parse.ColumnName) SharedTableState(org.apache.phoenix.coprocessor.MetaDataProtocol.SharedTableState) MutationState(org.apache.phoenix.execute.MutationState) PUnsignedLong(org.apache.phoenix.schema.types.PUnsignedLong) PLong(org.apache.phoenix.schema.types.PLong) Mutation(org.apache.hadoop.hbase.client.Mutation) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference)

Example 13 with MetaDataMutationResult

use of org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult in project phoenix by apache.

the class MetaDataClient method createSchema.

public MutationState createSchema(CreateSchemaStatement create) throws SQLException {
    boolean wasAutoCommit = connection.getAutoCommit();
    connection.rollback();
    try {
        if (!SchemaUtil.isNamespaceMappingEnabled(null, connection.getQueryServices().getProps())) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.CREATE_SCHEMA_NOT_ALLOWED).setSchemaName(create.getSchemaName()).build().buildException();
        }
        boolean isIfNotExists = create.isIfNotExists();
        validateSchema(create.getSchemaName());
        PSchema schema = new PSchema(create.getSchemaName());
        connection.setAutoCommit(false);
        List<Mutation> schemaMutations;
        try (PreparedStatement schemaUpsert = connection.prepareStatement(CREATE_SCHEMA)) {
            schemaUpsert.setString(1, schema.getSchemaName());
            schemaUpsert.setString(2, MetaDataClient.EMPTY_TABLE);
            schemaUpsert.execute();
            schemaMutations = connection.getMutationState().toMutations(null).next().getSecond();
            connection.rollback();
        }
        MetaDataMutationResult result = connection.getQueryServices().createSchema(schemaMutations, schema.getSchemaName());
        MutationCode code = result.getMutationCode();
        switch(code) {
            case SCHEMA_ALREADY_EXISTS:
                if (result.getSchema() != null) {
                    addSchemaToCache(result);
                }
                if (!isIfNotExists) {
                    throw new SchemaAlreadyExistsException(schema.getSchemaName());
                }
                break;
            case NEWER_SCHEMA_FOUND:
                throw new NewerSchemaAlreadyExistsException(schema.getSchemaName());
            default:
                result = new MetaDataMutationResult(code, schema, result.getMutationTime());
                addSchemaToCache(result);
        }
    } finally {
        connection.setAutoCommit(wasAutoCommit);
    }
    return new MutationState(0, 0, connection);
}
Also used : MutationState(org.apache.phoenix.execute.MutationState) PSchema(org.apache.phoenix.parse.PSchema) PreparedStatement(java.sql.PreparedStatement) Mutation(org.apache.hadoop.hbase.client.Mutation) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) MutationCode(org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode)

Example 14 with MetaDataMutationResult

use of org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult in project phoenix by apache.

the class MetaDataClient method updateCache.

private MetaDataMutationResult updateCache(PName origTenantId, String schemaName, String tableName, boolean alwaysHitServer, Long resolvedTimestamp) throws SQLException {
    // TODO: pass byte[] herez
    boolean systemTable = SYSTEM_CATALOG_SCHEMA.equals(schemaName);
    // System tables must always have a null tenantId
    PName tenantId = systemTable ? null : origTenantId;
    PTable table = null;
    PTableRef tableRef = null;
    String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
    long tableTimestamp = HConstants.LATEST_TIMESTAMP;
    long tableResolvedTimestamp = HConstants.LATEST_TIMESTAMP;
    try {
        tableRef = connection.getTableRef(new PTableKey(tenantId, fullTableName));
        table = tableRef.getTable();
        tableTimestamp = table.getTimeStamp();
        tableResolvedTimestamp = tableRef.getResolvedTimeStamp();
    } catch (TableNotFoundException e) {
    }
    boolean defaultTransactional = connection.getQueryServices().getProps().getBoolean(QueryServices.DEFAULT_TABLE_ISTRANSACTIONAL_ATTRIB, QueryServicesOptions.DEFAULT_TRANSACTIONAL);
    // start a txn if all table are transactional by default or if we found the table in the cache and it is transactional
    // TODO if system tables become transactional remove the check
    boolean isTransactional = defaultTransactional || (table != null && table.isTransactional());
    if (!systemTable && isTransactional && !connection.getMutationState().isTransactionStarted()) {
        connection.getMutationState().startTransaction();
    }
    resolvedTimestamp = resolvedTimestamp == null ? TransactionUtil.getResolvedTimestamp(connection, isTransactional, HConstants.LATEST_TIMESTAMP) : resolvedTimestamp;
    // 2. table was already resolved as of that timestamp
    if (table != null && !alwaysHitServer && (systemTable || resolvedTimestamp == tableResolvedTimestamp || connection.getMetaDataCache().getAge(tableRef) < table.getUpdateCacheFrequency())) {
        return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, QueryConstants.UNSET_TIMESTAMP, table);
    }
    int maxTryCount = tenantId == null ? 1 : 2;
    int tryCount = 0;
    MetaDataMutationResult result;
    do {
        final byte[] schemaBytes = PVarchar.INSTANCE.toBytes(schemaName);
        final byte[] tableBytes = PVarchar.INSTANCE.toBytes(tableName);
        ConnectionQueryServices queryServices = connection.getQueryServices();
        result = queryServices.getTable(tenantId, schemaBytes, tableBytes, tableTimestamp, resolvedTimestamp);
        // if the table was assumed to be transactional, but is actually not transactional then re-resolve as of the right timestamp (and vice versa)
        if (table == null && result.getTable() != null && result.getTable().isTransactional() != isTransactional) {
            result = queryServices.getTable(tenantId, schemaBytes, tableBytes, tableTimestamp, TransactionUtil.getResolvedTimestamp(connection, result.getTable().isTransactional(), HConstants.LATEST_TIMESTAMP));
        }
        if (SYSTEM_CATALOG_SCHEMA.equals(schemaName)) {
            return result;
        }
        MutationCode code = result.getMutationCode();
        PTable resultTable = result.getTable();
        // We found an updated table, so update our cache
        if (resultTable != null) {
            // Cache table, even if multi-tenant table found for null tenant_id
            // These may be accessed by tenant-specific connections, as the
            // tenant_id will always be added to mask other tenants data.
            // Otherwise, a tenant would be required to create a VIEW first
            // which is not really necessary unless you want to filter or add
            // columns
            addTableToCache(result);
            return result;
        } else {
            // server again.
            if (table != null) {
                // Ensures that table in result is set to table found in our cache.
                if (code == MutationCode.TABLE_ALREADY_EXISTS) {
                    result.setTable(table);
                    // Although this table is up-to-date, the parent table may not be.
                    // In this case, we update the parent table which may in turn pull
                    // in indexes to add to this table.
                    long resolvedTime = TransactionUtil.getResolvedTime(connection, result);
                    if (addIndexesFromParentTable(result, resolvedTimestamp)) {
                        connection.addTable(result.getTable(), resolvedTime);
                    } else {
                        // if we aren't adding the table, we still need to update the resolved time of the table
                        connection.updateResolvedTimestamp(table, resolvedTime);
                    }
                    return result;
                }
                // Otherwise, we're up to date, so there's nothing to do.
                if (code == MutationCode.TABLE_NOT_FOUND && tryCount + 1 == maxTryCount) {
                    connection.removeTable(origTenantId, fullTableName, table.getParentName() == null ? null : table.getParentName().getString(), table.getTimeStamp());
                }
            }
        }
        // Try again with global tenantId
        tenantId = null;
    } while (++tryCount < maxTryCount);
    return result;
}
Also used : IndexKeyConstraint(org.apache.phoenix.parse.IndexKeyConstraint) PrimaryKeyConstraint(org.apache.phoenix.parse.PrimaryKeyConstraint) ColumnDefInPkConstraint(org.apache.phoenix.parse.ColumnDefInPkConstraint) MutationCode(org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices)

Example 15 with MetaDataMutationResult

use of org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult in project phoenix by apache.

the class MetaDataClient method alterIndex.

public MutationState alterIndex(AlterIndexStatement statement) throws SQLException {
    connection.rollback();
    boolean wasAutoCommit = connection.getAutoCommit();
    try {
        String dataTableName = statement.getTableName();
        String schemaName = statement.getTable().getName().getSchemaName();
        String indexName = statement.getTable().getName().getTableName();
        boolean isAsync = statement.isAsync();
        PIndexState newIndexState = statement.getIndexState();
        if (isAsync && newIndexState != PIndexState.REBUILD) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.ASYNC_NOT_ALLOWED).setMessage(" ASYNC building of index is allowed only with REBUILD index state").setSchemaName(schemaName).setTableName(indexName).build().buildException();
        }
        if (newIndexState == PIndexState.REBUILD) {
            newIndexState = PIndexState.BUILDING;
        }
        connection.setAutoCommit(false);
        // Confirm index table is valid and up-to-date
        TableRef indexRef = FromCompiler.getResolver(statement, connection).getTables().get(0);
        PreparedStatement tableUpsert = null;
        try {
            if (newIndexState == PIndexState.ACTIVE) {
                tableUpsert = connection.prepareStatement(UPDATE_INDEX_STATE_TO_ACTIVE);
            } else {
                tableUpsert = connection.prepareStatement(UPDATE_INDEX_STATE);
            }
            tableUpsert.setString(1, connection.getTenantId() == null ? null : connection.getTenantId().getString());
            tableUpsert.setString(2, schemaName);
            tableUpsert.setString(3, indexName);
            tableUpsert.setString(4, newIndexState.getSerializedValue());
            tableUpsert.setLong(5, 0);
            if (newIndexState == PIndexState.ACTIVE) {
                tableUpsert.setLong(6, 0);
            }
            tableUpsert.execute();
        } finally {
            if (tableUpsert != null) {
                tableUpsert.close();
            }
        }
        Long timeStamp = indexRef.getTable().isTransactional() ? indexRef.getTimeStamp() : null;
        List<Mutation> tableMetadata = connection.getMutationState().toMutations(timeStamp).next().getSecond();
        connection.rollback();
        MetaDataMutationResult result = connection.getQueryServices().updateIndexState(tableMetadata, dataTableName);
        MutationCode code = result.getMutationCode();
        if (code == MutationCode.TABLE_NOT_FOUND) {
            throw new TableNotFoundException(schemaName, indexName);
        }
        if (code == MutationCode.UNALLOWED_TABLE_MUTATION) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_INDEX_STATE_TRANSITION).setMessage(" currentState=" + indexRef.getTable().getIndexState() + ". requestedState=" + newIndexState).setSchemaName(schemaName).setTableName(indexName).build().buildException();
        }
        if (code == MutationCode.TABLE_ALREADY_EXISTS) {
            if (result.getTable() != null) {
                // To accommodate connection-less update of index state
                addTableToCache(result);
                // Set so that we get the table below with the potentially modified rowKeyOrderOptimizable flag set
                indexRef.setTable(result.getTable());
                if (newIndexState == PIndexState.BUILDING && isAsync) {
                    try {
                        tableUpsert = connection.prepareStatement(UPDATE_INDEX_REBUILD_ASYNC_STATE);
                        tableUpsert.setString(1, connection.getTenantId() == null ? null : connection.getTenantId().getString());
                        tableUpsert.setString(2, schemaName);
                        tableUpsert.setString(3, indexName);
                        tableUpsert.setLong(4, result.getTable().getTimeStamp());
                        tableUpsert.execute();
                        connection.commit();
                    } finally {
                        if (tableUpsert != null) {
                            tableUpsert.close();
                        }
                    }
                }
            }
        }
        if (newIndexState == PIndexState.BUILDING && !isAsync) {
            PTable index = indexRef.getTable();
            // First delete any existing rows of the index
            Long scn = connection.getSCN();
            long ts = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
            MutationPlan plan = new PostDDLCompiler(connection).compile(Collections.singletonList(indexRef), null, null, Collections.<PColumn>emptyList(), ts);
            connection.getQueryServices().updateData(plan);
            NamedTableNode dataTableNode = NamedTableNode.create(null, TableName.create(schemaName, dataTableName), Collections.<ColumnDef>emptyList());
            // Next rebuild the index
            connection.setAutoCommit(true);
            if (connection.getSCN() != null) {
                return buildIndexAtTimeStamp(index, dataTableNode);
            }
            TableRef dataTableRef = FromCompiler.getResolver(dataTableNode, connection).getTables().get(0);
            return buildIndex(index, dataTableRef);
        }
        return new MutationState(1, 1000, connection);
    } catch (TableNotFoundException e) {
        if (!statement.ifExists()) {
            throw e;
        }
        return new MutationState(0, 0, connection);
    } finally {
        connection.setAutoCommit(wasAutoCommit);
    }
}
Also used : PreparedStatement(java.sql.PreparedStatement) MutationPlan(org.apache.phoenix.compile.MutationPlan) PostDDLCompiler(org.apache.phoenix.compile.PostDDLCompiler) MutationCode(org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode) MutationState(org.apache.phoenix.execute.MutationState) PUnsignedLong(org.apache.phoenix.schema.types.PUnsignedLong) PLong(org.apache.phoenix.schema.types.PLong) NamedTableNode(org.apache.phoenix.parse.NamedTableNode) Mutation(org.apache.hadoop.hbase.client.Mutation) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)

Aggregations

MetaDataMutationResult (org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)36 Mutation (org.apache.hadoop.hbase.client.Mutation)20 MutationCode (org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode)15 PLong (org.apache.phoenix.schema.types.PLong)11 MutationState (org.apache.phoenix.execute.MutationState)10 PTable (org.apache.phoenix.schema.PTable)10 PUnsignedLong (org.apache.phoenix.schema.types.PUnsignedLong)9 IOException (java.io.IOException)8 BlockingRpcCallback (org.apache.hadoop.hbase.ipc.BlockingRpcCallback)8 ServerRpcController (org.apache.hadoop.hbase.ipc.ServerRpcController)8 MutationProto (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto)8 MetaDataResponse (org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse)8 MetaDataService (org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService)8 SQLExceptionInfo (org.apache.phoenix.exception.SQLExceptionInfo)8 PhoenixIndexBuilder (org.apache.phoenix.index.PhoenixIndexBuilder)8 TableNotFoundException (org.apache.phoenix.schema.TableNotFoundException)8 ThreadFactoryBuilder (com.google.common.util.concurrent.ThreadFactoryBuilder)7 ArrayList (java.util.ArrayList)7 HashMap (java.util.HashMap)7 Batch (org.apache.hadoop.hbase.client.coprocessor.Batch)7