Search in sources :

Example 6 with ColumnFamilyNotFoundException

use of org.apache.phoenix.schema.ColumnFamilyNotFoundException in project phoenix by apache.

the class MetaDataEndpointImpl method dropColumnsFromChildViews.

private MetaDataMutationResult dropColumnsFromChildViews(Region region, PTable basePhysicalTable, List<RowLock> locks, List<Mutation> tableMetadata, List<Mutation> mutationsForAddingColumnsToViews, byte[] schemaName, byte[] tableName, List<ImmutableBytesPtr> invalidateList, long clientTimeStamp, TableViewFinder childViewsResult, List<byte[]> tableNamesToDelete, List<SharedTableState> sharedTablesToDelete) throws IOException, SQLException {
    List<Delete> columnDeletesForBaseTable = new ArrayList<>(tableMetadata.size());
    // are being added.
    for (Mutation m : tableMetadata) {
        if (m instanceof Delete) {
            byte[][] rkmd = new byte[5][];
            int pkCount = getVarChars(m.getRow(), rkmd);
            if (pkCount > COLUMN_NAME_INDEX && Bytes.compareTo(schemaName, rkmd[SCHEMA_NAME_INDEX]) == 0 && Bytes.compareTo(tableName, rkmd[TABLE_NAME_INDEX]) == 0) {
                columnDeletesForBaseTable.add((Delete) m);
            }
        }
    }
    for (ViewInfo viewInfo : childViewsResult.getViewInfoList()) {
        short numColsDeleted = 0;
        byte[] viewTenantId = viewInfo.getTenantId();
        byte[] viewSchemaName = viewInfo.getSchemaName();
        byte[] viewName = viewInfo.getViewName();
        byte[] viewKey = SchemaUtil.getTableKey(viewTenantId, viewSchemaName, viewName);
        // lock the rows corresponding to views so that no other thread can modify the view
        // meta-data
        RowLock viewRowLock = acquireLock(region, viewKey, locks);
        PTable view = doGetTable(viewKey, clientTimeStamp, viewRowLock);
        ColumnOrdinalPositionUpdateList ordinalPositionList = new ColumnOrdinalPositionUpdateList();
        int numCols = view.getColumns().size();
        int minDroppedColOrdinalPos = Integer.MAX_VALUE;
        for (Delete columnDeleteForBaseTable : columnDeletesForBaseTable) {
            PColumn existingViewColumn = null;
            byte[][] rkmd = new byte[5][];
            getVarChars(columnDeleteForBaseTable.getRow(), rkmd);
            String columnName = Bytes.toString(rkmd[COLUMN_NAME_INDEX]);
            String columnFamily = rkmd[FAMILY_NAME_INDEX] == null ? null : Bytes.toString(rkmd[FAMILY_NAME_INDEX]);
            byte[] columnKey = getColumnKey(viewKey, columnName, columnFamily);
            try {
                existingViewColumn = columnFamily == null ? view.getColumnForColumnName(columnName) : view.getColumnFamily(columnFamily).getPColumnForColumnName(columnName);
            } catch (ColumnFamilyNotFoundException e) {
            // ignore since it means that the column family is not present for the column to
            // be added.
            } catch (ColumnNotFoundException e) {
            // ignore since it means the column is not present in the view
            }
            // it
            if (existingViewColumn != null && view.getViewStatement() != null) {
                ParseNode viewWhere = new SQLParser(view.getViewStatement()).parseQuery().getWhere();
                PhoenixConnection conn = null;
                try {
                    conn = QueryUtil.getConnectionOnServer(env.getConfiguration()).unwrap(PhoenixConnection.class);
                } catch (ClassNotFoundException e) {
                }
                PhoenixStatement statement = new PhoenixStatement(conn);
                TableRef baseTableRef = new TableRef(basePhysicalTable);
                ColumnResolver columnResolver = FromCompiler.getResolver(baseTableRef);
                StatementContext context = new StatementContext(statement, columnResolver);
                Expression whereExpression = WhereCompiler.compile(context, viewWhere);
                Expression colExpression = new ColumnRef(baseTableRef, existingViewColumn.getPosition()).newColumnExpression();
                ColumnFinder columnFinder = new ColumnFinder(colExpression);
                whereExpression.accept(columnFinder);
                if (columnFinder.getColumnFound()) {
                    return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, EnvironmentEdgeManager.currentTimeMillis(), basePhysicalTable);
                }
            }
            minDroppedColOrdinalPos = Math.min(getOrdinalPosition(view, existingViewColumn), minDroppedColOrdinalPos);
            if (existingViewColumn != null) {
                --numColsDeleted;
                if (ordinalPositionList.size() == 0) {
                    ordinalPositionList.setOffset(view.getBucketNum() == null ? 1 : 0);
                    for (PColumn col : view.getColumns()) {
                        ordinalPositionList.addColumn(getColumnKey(viewKey, col));
                    }
                }
                ordinalPositionList.dropColumn(columnKey);
                Delete viewColumnDelete = new Delete(columnKey, clientTimeStamp);
                mutationsForAddingColumnsToViews.add(viewColumnDelete);
                // drop any view indexes that need this column
                dropIndexes(view, region, invalidateList, locks, clientTimeStamp, schemaName, view.getName().getBytes(), mutationsForAddingColumnsToViews, existingViewColumn, tableNamesToDelete, sharedTablesToDelete);
            }
        }
        updateViewHeaderRow(basePhysicalTable, tableMetadata, mutationsForAddingColumnsToViews, invalidateList, clientTimeStamp, numColsDeleted, numColsDeleted, viewKey, view, ordinalPositionList, numCols, true);
    }
    return null;
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ArrayList(java.util.ArrayList) ByteString(com.google.protobuf.ByteString) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) PTable(org.apache.phoenix.schema.PTable) StatementContext(org.apache.phoenix.compile.StatementContext) PColumn(org.apache.phoenix.schema.PColumn) ParseNode(org.apache.phoenix.parse.ParseNode) LiteralParseNode(org.apache.phoenix.parse.LiteralParseNode) ColumnResolver(org.apache.phoenix.compile.ColumnResolver) RowLock(org.apache.hadoop.hbase.regionserver.Region.RowLock) PTinyint(org.apache.phoenix.schema.types.PTinyint) PSmallint(org.apache.phoenix.schema.types.PSmallint) ColumnFamilyNotFoundException(org.apache.phoenix.schema.ColumnFamilyNotFoundException) ColumnNotFoundException(org.apache.phoenix.schema.ColumnNotFoundException) SQLParser(org.apache.phoenix.parse.SQLParser) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) RowKeyColumnExpression(org.apache.phoenix.expression.RowKeyColumnExpression) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) Expression(org.apache.phoenix.expression.Expression) ProjectedColumnExpression(org.apache.phoenix.expression.ProjectedColumnExpression) Mutation(org.apache.hadoop.hbase.client.Mutation) ColumnRef(org.apache.phoenix.schema.ColumnRef) TableRef(org.apache.phoenix.schema.TableRef)

Example 7 with ColumnFamilyNotFoundException

use of org.apache.phoenix.schema.ColumnFamilyNotFoundException in project phoenix by apache.

the class MetaDataEndpointImpl method addColumnsAndTablePropertiesToChildViews.

private MetaDataMutationResult addColumnsAndTablePropertiesToChildViews(PTable basePhysicalTable, List<Mutation> tableMetadata, List<Mutation> mutationsForAddingColumnsToViews, byte[] schemaName, byte[] tableName, List<ImmutableBytesPtr> invalidateList, long clientTimeStamp, TableViewFinder childViewsResult, Region region, List<RowLock> locks) throws IOException, SQLException {
    List<PutWithOrdinalPosition> columnPutsForBaseTable = Lists.newArrayListWithExpectedSize(tableMetadata.size());
    Map<TableProperty, Cell> tablePropertyCellMap = Maps.newHashMapWithExpectedSize(tableMetadata.size());
    // Isolate the puts relevant to adding columns. Also figure out what kind of columns are being added.
    for (Mutation m : tableMetadata) {
        if (m instanceof Put) {
            byte[][] rkmd = new byte[5][];
            int pkCount = getVarChars(m.getRow(), rkmd);
            // check if this put is for adding a column
            if (pkCount > COLUMN_NAME_INDEX && rkmd[COLUMN_NAME_INDEX] != null && rkmd[COLUMN_NAME_INDEX].length > 0 && Bytes.compareTo(schemaName, rkmd[SCHEMA_NAME_INDEX]) == 0 && Bytes.compareTo(tableName, rkmd[TABLE_NAME_INDEX]) == 0) {
                columnPutsForBaseTable.add(new PutWithOrdinalPosition((Put) m, getInteger((Put) m, TABLE_FAMILY_BYTES, ORDINAL_POSITION_BYTES)));
            } else // check if the put is for a table property
            if (pkCount <= COLUMN_NAME_INDEX && Bytes.compareTo(schemaName, rkmd[SCHEMA_NAME_INDEX]) == 0 && Bytes.compareTo(tableName, rkmd[TABLE_NAME_INDEX]) == 0) {
                for (Cell cell : m.getFamilyCellMap().get(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES)) {
                    for (TableProperty tableProp : TableProperty.values()) {
                        byte[] propNameBytes = Bytes.toBytes(tableProp.getPropertyName());
                        if (Bytes.compareTo(propNameBytes, 0, propNameBytes.length, cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()) == 0 && tableProp.isValidOnView() && tableProp.isMutable()) {
                            Cell tablePropCell = CellUtil.createCell(cell.getRow(), CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), cell.getTimestamp(), cell.getTypeByte(), CellUtil.cloneValue(cell));
                            tablePropertyCellMap.put(tableProp, tablePropCell);
                        }
                    }
                }
            }
        }
    }
    // Sort the puts by ordinal position 
    Collections.sort(columnPutsForBaseTable);
    for (ViewInfo viewInfo : childViewsResult.getViewInfoList()) {
        short deltaNumPkColsSoFar = 0;
        short columnsAddedToView = 0;
        short columnsAddedToBaseTable = 0;
        byte[] tenantId = viewInfo.getTenantId();
        byte[] schema = viewInfo.getSchemaName();
        byte[] table = viewInfo.getViewName();
        byte[] viewKey = SchemaUtil.getTableKey(tenantId, schema, table);
        // lock the rows corresponding to views so that no other thread can modify the view meta-data
        RowLock viewRowLock = acquireLock(region, viewKey, locks);
        PTable view = doGetTable(viewKey, clientTimeStamp, viewRowLock);
        ColumnOrdinalPositionUpdateList ordinalPositionList = new ColumnOrdinalPositionUpdateList();
        List<PColumn> viewPkCols = new ArrayList<>(view.getPKColumns());
        boolean addingExistingPkCol = false;
        int numCols = view.getColumns().size();
        // add the new columns to the child view
        for (PutWithOrdinalPosition p : columnPutsForBaseTable) {
            Put baseTableColumnPut = p.put;
            PColumn existingViewColumn = null;
            byte[][] rkmd = new byte[5][];
            getVarChars(baseTableColumnPut.getRow(), rkmd);
            String columnName = Bytes.toString(rkmd[COLUMN_NAME_INDEX]);
            String columnFamily = rkmd[FAMILY_NAME_INDEX] == null ? null : Bytes.toString(rkmd[FAMILY_NAME_INDEX]);
            try {
                existingViewColumn = columnFamily == null ? view.getColumnForColumnName(columnName) : view.getColumnFamily(columnFamily).getPColumnForColumnName(columnName);
            } catch (ColumnFamilyNotFoundException e) {
            // ignore since it means that the column family is not present for the column to be added.
            } catch (ColumnNotFoundException e) {
            // ignore since it means the column is not present in the view
            }
            boolean isPkCol = columnFamily == null;
            byte[] columnKey = getColumnKey(viewKey, columnName, columnFamily);
            if (existingViewColumn != null) {
                MetaDataMutationResult result = validateColumnForAddToBaseTable(existingViewColumn, baseTableColumnPut, basePhysicalTable, isPkCol, view);
                if (result != null) {
                    return result;
                }
                if (isPkCol) {
                    viewPkCols.remove(existingViewColumn);
                    addingExistingPkCol = true;
                }
                /*
                     * For views that are not diverged, we need to make sure that the existing columns
                     * have the same ordinal position as in the base table. This is important because
                     * we rely on the ordinal position of the column to figure out whether dropping a 
                     * column from the view will end up diverging the view from the base table.
                     * 
                     * For already diverged views, we don't care about the ordinal position of the existing column.
                     */
                if (!isDivergedView(view)) {
                    int newOrdinalPosition = p.ordinalPosition;
                    // Check if the ordinal position of the column was getting updated from previous add column
                    // mutations.
                    int existingOrdinalPos = ordinalPositionList.getOrdinalPositionOfColumn(columnKey);
                    if (ordinalPositionList.size() == 0) {
                        /*
                             * No ordinal positions to be updated are in the list. In that case, check whether the
                             * existing ordinal position of the column is different from its new ordinal position.
                             * If yes, then initialize the ordinal position list with this column's ordinal position
                             * as the offset.
                             */
                        existingOrdinalPos = getOrdinalPosition(view, existingViewColumn);
                        if (existingOrdinalPos != newOrdinalPosition) {
                            ordinalPositionList.setOffset(newOrdinalPosition);
                            ordinalPositionList.addColumn(columnKey, newOrdinalPosition);
                            for (PColumn col : view.getColumns()) {
                                int ordinalPos = getOrdinalPosition(view, col);
                                if (ordinalPos >= newOrdinalPosition) {
                                    if (ordinalPos == existingOrdinalPos) {
                                        /*
                                             * No need to update ordinal positions of columns beyond the existing column's 
                                             * old ordinal position.
                                             */
                                        break;
                                    }
                                    // increment ordinal position of columns occurring after this column by 1
                                    int updatedPos = ordinalPos + 1;
                                    ordinalPositionList.addColumn(getColumnKey(viewKey, col), updatedPos);
                                }
                            }
                        }
                    } else {
                        if (existingOrdinalPos != newOrdinalPosition) {
                            ordinalPositionList.addColumn(columnKey, newOrdinalPosition);
                        }
                    }
                    columnsAddedToBaseTable++;
                }
            } else {
                // The column doesn't exist in the view.
                Put viewColumnPut = new Put(columnKey, clientTimeStamp);
                for (Cell cell : baseTableColumnPut.getFamilyCellMap().values().iterator().next()) {
                    viewColumnPut.add(CellUtil.createCell(columnKey, CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), cell.getTimestamp(), cell.getTypeByte(), CellUtil.cloneValue(cell)));
                }
                if (isDivergedView(view)) {
                    if (isPkCol) {
                        /* 
                             * Only pk cols of the base table are added to the diverged views. These pk 
                             * cols are added at the end.
                             */
                        int lastOrdinalPos = getOrdinalPosition(view, view.getColumns().get(numCols - 1));
                        int newPosition = ++lastOrdinalPos;
                        byte[] ptr = new byte[PInteger.INSTANCE.getByteSize()];
                        PInteger.INSTANCE.getCodec().encodeInt(newPosition, ptr, 0);
                        viewColumnPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.ORDINAL_POSITION_BYTES, clientTimeStamp, ptr);
                        mutationsForAddingColumnsToViews.add(viewColumnPut);
                    } else {
                        // move on to the next column
                        continue;
                    }
                } else {
                    int newOrdinalPosition = p.ordinalPosition;
                    /*
                         * For a non-diverged view, we need to make sure that the base table column
                         * is added at the right position.
                         */
                    if (ordinalPositionList.size() == 0) {
                        ordinalPositionList.setOffset(newOrdinalPosition);
                        ordinalPositionList.addColumn(columnKey, newOrdinalPosition);
                        for (PColumn col : view.getColumns()) {
                            int ordinalPos = getOrdinalPosition(view, col);
                            if (ordinalPos >= newOrdinalPosition) {
                                // increment ordinal position of columns by 1
                                int updatedPos = ordinalPos + 1;
                                ordinalPositionList.addColumn(getColumnKey(viewKey, col), updatedPos);
                            }
                        }
                    } else {
                        ordinalPositionList.addColumn(columnKey, newOrdinalPosition);
                    }
                    mutationsForAddingColumnsToViews.add(viewColumnPut);
                }
                if (isPkCol) {
                    deltaNumPkColsSoFar++;
                    // Set the key sequence for the pk column to be added
                    short currentKeySeq = SchemaUtil.getMaxKeySeq(view);
                    short newKeySeq = (short) (currentKeySeq + deltaNumPkColsSoFar);
                    byte[] keySeqBytes = new byte[PSmallint.INSTANCE.getByteSize()];
                    PSmallint.INSTANCE.getCodec().encodeShort(newKeySeq, keySeqBytes, 0);
                    viewColumnPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.KEY_SEQ_BYTES, keySeqBytes);
                    addMutationsForAddingPkColsToViewIndexes(mutationsForAddingColumnsToViews, clientTimeStamp, view, deltaNumPkColsSoFar, columnName, viewColumnPut);
                }
                columnsAddedToView++;
                columnsAddedToBaseTable++;
            }
        }
        /*
             * Allow adding a pk columns to base table : 1. if all the view pk columns are exactly the same as the base
             * table pk columns 2. if we are adding all the existing view pk columns to the base table
             */
        if (addingExistingPkCol && !viewPkCols.equals(basePhysicalTable.getPKColumns())) {
            return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, EnvironmentEdgeManager.currentTimeMillis(), basePhysicalTable);
        }
        addViewIndexesHeaderRowMutations(mutationsForAddingColumnsToViews, invalidateList, clientTimeStamp, view, deltaNumPkColsSoFar);
        // set table properties in child view
        if (!tablePropertyCellMap.isEmpty()) {
            Put viewHeaderRowPut = new Put(viewKey, clientTimeStamp);
            for (TableProperty tableProp : TableProperty.values()) {
                Cell tablePropertyCell = tablePropertyCellMap.get(tableProp);
                if (tablePropertyCell != null) {
                    // or if it is mutable on a view and the property value is the same as the base table property (which means it wasn't changed on the view)
                    if (!tableProp.isMutableOnView() || tableProp.getPTableValue(view).equals(tableProp.getPTableValue(basePhysicalTable))) {
                        viewHeaderRowPut.add(CellUtil.createCell(viewKey, CellUtil.cloneFamily(tablePropertyCell), CellUtil.cloneQualifier(tablePropertyCell), clientTimeStamp, tablePropertyCell.getTypeByte(), CellUtil.cloneValue(tablePropertyCell)));
                    }
                }
            }
            byte[] viewSequencePtr = new byte[PLong.INSTANCE.getByteSize()];
            PLong.INSTANCE.getCodec().encodeLong(view.getSequenceNumber() + 1, viewSequencePtr, 0);
            viewHeaderRowPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.TABLE_SEQ_NUM_BYTES, clientTimeStamp, viewSequencePtr);
            // invalidate the view so that it is removed from the cache
            invalidateList.add(new ImmutableBytesPtr(viewKey));
            mutationsForAddingColumnsToViews.add(viewHeaderRowPut);
        }
        /*
             * Increment the sequence number by 1 if:
             * 1) For a diverged view, there were columns (pk columns) added to the view.
             * 2) For a non-diverged view if the base column count changed.
             */
        boolean changeSequenceNumber = (isDivergedView(view) && columnsAddedToView > 0) || (!isDivergedView(view) && columnsAddedToBaseTable > 0);
        updateViewHeaderRow(basePhysicalTable, tableMetadata, mutationsForAddingColumnsToViews, invalidateList, clientTimeStamp, columnsAddedToView, columnsAddedToBaseTable, viewKey, view, ordinalPositionList, numCols, changeSequenceNumber);
    }
    return null;
}
Also used : ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) ArrayList(java.util.ArrayList) ByteString(com.google.protobuf.ByteString) Put(org.apache.hadoop.hbase.client.Put) PTinyint(org.apache.phoenix.schema.types.PTinyint) PSmallint(org.apache.phoenix.schema.types.PSmallint) PTable(org.apache.phoenix.schema.PTable) ColumnFamilyNotFoundException(org.apache.phoenix.schema.ColumnFamilyNotFoundException) PColumn(org.apache.phoenix.schema.PColumn) ColumnNotFoundException(org.apache.phoenix.schema.ColumnNotFoundException) Mutation(org.apache.hadoop.hbase.client.Mutation) Cell(org.apache.hadoop.hbase.Cell) TableProperty(org.apache.phoenix.schema.TableProperty) RowLock(org.apache.hadoop.hbase.regionserver.Region.RowLock)

Example 8 with ColumnFamilyNotFoundException

use of org.apache.phoenix.schema.ColumnFamilyNotFoundException in project phoenix by apache.

the class ProjectionCompiler method projectIndexColumnFamily.

private static void projectIndexColumnFamily(StatementContext context, String cfName, TableRef tableRef, boolean resolveColumn, List<Expression> projectedExpressions, List<ExpressionProjector> projectedColumns) throws SQLException {
    PTable index = tableRef.getTable();
    PhoenixConnection conn = context.getConnection();
    String tableName = index.getParentName().getString();
    PTable table = conn.getTable(new PTableKey(conn.getTenantId(), tableName));
    PColumnFamily pfamily = table.getColumnFamily(cfName);
    for (PColumn column : pfamily.getColumns()) {
        String indexColName = IndexUtil.getIndexColumnName(column);
        PColumn indexColumn = null;
        ColumnRef ref = null;
        String indexColumnFamily = null;
        try {
            indexColumn = index.getColumnForColumnName(indexColName);
            ref = new ColumnRef(tableRef, indexColumn.getPosition());
            indexColumnFamily = indexColumn.getFamilyName() == null ? null : indexColumn.getFamilyName().getString();
        } catch (ColumnNotFoundException e) {
            if (index.getIndexType() == IndexType.LOCAL) {
                try {
                    ref = new LocalIndexDataColumnRef(context, indexColName);
                    indexColumn = ref.getColumn();
                    indexColumnFamily = indexColumn.getFamilyName() == null ? null : (index.getIndexType() == IndexType.LOCAL ? IndexUtil.getLocalIndexColumnFamily(indexColumn.getFamilyName().getString()) : indexColumn.getFamilyName().getString());
                } catch (ColumnFamilyNotFoundException c) {
                    throw e;
                }
            } else {
                throw e;
            }
        }
        if (resolveColumn) {
            ref = context.getResolver().resolveColumn(index.getTableName().getString(), indexColumnFamily, indexColName);
        }
        Expression expression = ref.newColumnExpression();
        projectedExpressions.add(expression);
        String colName = column.getName().toString();
        boolean isCaseSensitive = !SchemaUtil.normalizeIdentifier(colName).equals(colName);
        projectedColumns.add(new ExpressionProjector(colName, tableRef.getTableAlias() == null ? table.getName().getString() : tableRef.getTableAlias(), expression, isCaseSensitive));
    }
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) PColumnFamily(org.apache.phoenix.schema.PColumnFamily) LocalIndexDataColumnRef(org.apache.phoenix.schema.LocalIndexDataColumnRef) PTable(org.apache.phoenix.schema.PTable) ColumnFamilyNotFoundException(org.apache.phoenix.schema.ColumnFamilyNotFoundException) PColumn(org.apache.phoenix.schema.PColumn) ColumnNotFoundException(org.apache.phoenix.schema.ColumnNotFoundException) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) BaseTerminalExpression(org.apache.phoenix.expression.BaseTerminalExpression) Expression(org.apache.phoenix.expression.Expression) SingleCellColumnExpression(org.apache.phoenix.expression.SingleCellColumnExpression) ProjectedColumnExpression(org.apache.phoenix.expression.ProjectedColumnExpression) CoerceExpression(org.apache.phoenix.expression.CoerceExpression) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) ColumnRef(org.apache.phoenix.schema.ColumnRef) LocalIndexDataColumnRef(org.apache.phoenix.schema.LocalIndexDataColumnRef) PTableKey(org.apache.phoenix.schema.PTableKey)

Example 9 with ColumnFamilyNotFoundException

use of org.apache.phoenix.schema.ColumnFamilyNotFoundException in project phoenix by apache.

the class ProjectionCompiler method projectAllIndexColumns.

private static void projectAllIndexColumns(StatementContext context, TableRef tableRef, boolean resolveColumn, List<Expression> projectedExpressions, List<ExpressionProjector> projectedColumns, List<? extends PDatum> targetColumns) throws SQLException {
    ColumnResolver resolver = context.getResolver();
    PTable index = tableRef.getTable();
    int projectedOffset = projectedExpressions.size();
    PhoenixConnection conn = context.getConnection();
    PName tenantId = conn.getTenantId();
    String tableName = index.getParentName().getString();
    PTable dataTable = null;
    try {
        dataTable = conn.getTable(new PTableKey(tenantId, tableName));
    } catch (TableNotFoundException e) {
        if (tenantId != null) {
            // Check with null tenantId
            dataTable = conn.getTable(new PTableKey(null, tableName));
        } else {
            throw e;
        }
    }
    int tableOffset = dataTable.getBucketNum() == null ? 0 : 1;
    int minTablePKOffset = getMinPKOffset(dataTable, tenantId);
    int minIndexPKOffset = getMinPKOffset(index, tenantId);
    if (index.getIndexType() != IndexType.LOCAL) {
        if (index.getColumns().size() - minIndexPKOffset != dataTable.getColumns().size() - minTablePKOffset) {
            // We'll end up not using this by the optimizer, so just throw
            String schemaNameStr = dataTable.getSchemaName() == null ? null : dataTable.getSchemaName().getString();
            String tableNameStr = dataTable.getTableName() == null ? null : dataTable.getTableName().getString();
            throw new ColumnNotFoundException(schemaNameStr, tableNameStr, null, WildcardParseNode.INSTANCE.toString());
        }
    }
    for (int i = tableOffset, j = tableOffset; i < dataTable.getColumns().size(); i++) {
        PColumn column = dataTable.getColumns().get(i);
        // Skip tenant ID column (which may not be the first column, but is the first PK column)
        if (SchemaUtil.isPKColumn(column) && j++ < minTablePKOffset) {
            tableOffset++;
            continue;
        }
        PColumn tableColumn = dataTable.getColumns().get(i);
        String indexColName = IndexUtil.getIndexColumnName(tableColumn);
        PColumn indexColumn = null;
        ColumnRef ref = null;
        try {
            indexColumn = index.getColumnForColumnName(indexColName);
            ref = new ColumnRef(tableRef, indexColumn.getPosition());
        } catch (ColumnNotFoundException e) {
            if (index.getIndexType() == IndexType.LOCAL) {
                try {
                    ref = new LocalIndexDataColumnRef(context, indexColName);
                    indexColumn = ref.getColumn();
                } catch (ColumnFamilyNotFoundException c) {
                    throw e;
                }
            } else {
                throw e;
            }
        }
        String colName = tableColumn.getName().getString();
        String tableAlias = tableRef.getTableAlias();
        if (resolveColumn) {
            try {
                if (tableAlias != null) {
                    ref = resolver.resolveColumn(null, tableAlias, indexColName);
                } else {
                    String schemaName = index.getSchemaName().getString();
                    ref = resolver.resolveColumn(schemaName.length() == 0 ? null : schemaName, index.getTableName().getString(), indexColName);
                }
            } catch (AmbiguousColumnException e) {
                if (indexColumn.getFamilyName() != null) {
                    ref = resolver.resolveColumn(tableAlias != null ? tableAlias : index.getTableName().getString(), indexColumn.getFamilyName().getString(), indexColName);
                } else {
                    throw e;
                }
            }
        }
        Expression expression = ref.newColumnExpression();
        expression = coerceIfNecessary(i - tableOffset + projectedOffset, targetColumns, expression);
        // We do not need to check if the column is a viewConstant, because view constants never
        // appear as a column in an index
        projectedExpressions.add(expression);
        boolean isCaseSensitive = !SchemaUtil.normalizeIdentifier(colName).equals(colName);
        ExpressionProjector projector = new ExpressionProjector(colName, tableRef.getTableAlias() == null ? dataTable.getName().getString() : tableRef.getTableAlias(), expression, isCaseSensitive);
        projectedColumns.add(projector);
    }
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) LocalIndexDataColumnRef(org.apache.phoenix.schema.LocalIndexDataColumnRef) PTable(org.apache.phoenix.schema.PTable) ColumnFamilyNotFoundException(org.apache.phoenix.schema.ColumnFamilyNotFoundException) PColumn(org.apache.phoenix.schema.PColumn) TableNotFoundException(org.apache.phoenix.schema.TableNotFoundException) ColumnNotFoundException(org.apache.phoenix.schema.ColumnNotFoundException) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) BaseTerminalExpression(org.apache.phoenix.expression.BaseTerminalExpression) Expression(org.apache.phoenix.expression.Expression) SingleCellColumnExpression(org.apache.phoenix.expression.SingleCellColumnExpression) ProjectedColumnExpression(org.apache.phoenix.expression.ProjectedColumnExpression) CoerceExpression(org.apache.phoenix.expression.CoerceExpression) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) PName(org.apache.phoenix.schema.PName) ColumnRef(org.apache.phoenix.schema.ColumnRef) LocalIndexDataColumnRef(org.apache.phoenix.schema.LocalIndexDataColumnRef) AmbiguousColumnException(org.apache.phoenix.schema.AmbiguousColumnException) PTableKey(org.apache.phoenix.schema.PTableKey)

Example 10 with ColumnFamilyNotFoundException

use of org.apache.phoenix.schema.ColumnFamilyNotFoundException in project phoenix by apache.

the class MetaDataEndpointImpl method addColumn.

@Override
public void addColumn(RpcController controller, final AddColumnRequest request, RpcCallback<MetaDataResponse> done) {
    try {
        List<Mutation> tableMetaData = ProtobufUtil.getMutations(request);
        MetaDataMutationResult result = mutateColumn(tableMetaData, new ColumnMutator() {

            @Override
            public MetaDataMutationResult updateMutation(PTable table, byte[][] rowKeyMetaData, List<Mutation> tableMetaData, Region region, List<ImmutableBytesPtr> invalidateList, List<RowLock> locks, long clientTimeStamp) throws IOException, SQLException {
                byte[] tenantId = rowKeyMetaData[TENANT_ID_INDEX];
                byte[] schemaName = rowKeyMetaData[SCHEMA_NAME_INDEX];
                byte[] tableName = rowKeyMetaData[TABLE_NAME_INDEX];
                PTableType type = table.getType();
                byte[] tableHeaderRowKey = SchemaUtil.getTableKey(tenantId, schemaName, tableName);
                // Size for worst case - all new columns are PK column
                List<Mutation> mutationsForAddingColumnsToViews = Lists.newArrayListWithExpectedSize(tableMetaData.size() * (1 + table.getIndexes().size()));
                if (type == PTableType.TABLE || type == PTableType.SYSTEM) {
                    TableViewFinder childViewsResult = new TableViewFinder();
                    findAllChildViews(region, tenantId, table, childViewsResult, clientTimeStamp);
                    if (childViewsResult.hasViews()) {
                        /* 
                             * Dis-allow if:
                             * 1) The meta-data for child view/s spans over
                             * more than one region (since the changes cannot be made in a transactional fashion)
                             * 
                             * 2) The base column count is 0 which means that the metadata hasn't been upgraded yet or
                             * the upgrade is currently in progress.
                             * 
                             * 3) If the request is from a client that is older than 4.5 version of phoenix. 
                             * Starting from 4.5, metadata requests have the client version included in them. 
                             * We don't want to allow clients before 4.5 to add a column to the base table if it has views.
                             * 
                             * 4) Trying to swtich tenancy of a table that has views
                             */
                        if (!childViewsResult.allViewsInSingleRegion() || table.getBaseColumnCount() == 0 || !request.hasClientVersion() || switchAttribute(table, table.isMultiTenant(), tableMetaData, MULTI_TENANT_BYTES)) {
                            return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, EnvironmentEdgeManager.currentTimeMillis(), null);
                        } else {
                            mutationsForAddingColumnsToViews = new ArrayList<>(childViewsResult.getViewInfoList().size() * tableMetaData.size());
                            MetaDataMutationResult mutationResult = addColumnsAndTablePropertiesToChildViews(table, tableMetaData, mutationsForAddingColumnsToViews, schemaName, tableName, invalidateList, clientTimeStamp, childViewsResult, region, locks);
                            // return if we were not able to add the column successfully
                            if (mutationResult != null)
                                return mutationResult;
                        }
                    }
                } else if (type == PTableType.VIEW && EncodedColumnsUtil.usesEncodedColumnNames(table)) {
                    /*
                         * When adding a column to a view that uses encoded column name scheme, we
                         * need to modify the CQ counters stored in the view's physical table. So to
                         * make sure clients get the latest PTable, we need to invalidate the cache
                         * entry.
                         */
                    invalidateList.add(new ImmutableBytesPtr(MetaDataUtil.getPhysicalTableRowForView(table)));
                }
                for (Mutation m : tableMetaData) {
                    byte[] key = m.getRow();
                    boolean addingPKColumn = false;
                    int pkCount = getVarChars(key, rowKeyMetaData);
                    if (pkCount > COLUMN_NAME_INDEX && Bytes.compareTo(schemaName, rowKeyMetaData[SCHEMA_NAME_INDEX]) == 0 && Bytes.compareTo(tableName, rowKeyMetaData[TABLE_NAME_INDEX]) == 0) {
                        try {
                            if (pkCount > FAMILY_NAME_INDEX && rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX].length > 0) {
                                PColumnFamily family = table.getColumnFamily(rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]);
                                family.getPColumnForColumnNameBytes(rowKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX]);
                            } else if (pkCount > COLUMN_NAME_INDEX && rowKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX].length > 0) {
                                addingPKColumn = true;
                                table.getPKColumn(new String(rowKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX]));
                            } else {
                                continue;
                            }
                            return new MetaDataMutationResult(MutationCode.COLUMN_ALREADY_EXISTS, EnvironmentEdgeManager.currentTimeMillis(), table);
                        } catch (ColumnFamilyNotFoundException e) {
                            continue;
                        } catch (ColumnNotFoundException e) {
                            if (addingPKColumn) {
                                // able to be rowKeyOptimized, it should continue to be so.
                                if (table.rowKeyOrderOptimizable()) {
                                    UpgradeUtil.addRowKeyOrderOptimizableCell(mutationsForAddingColumnsToViews, tableHeaderRowKey, clientTimeStamp);
                                } else if (table.getType() == PTableType.VIEW) {
                                    // does not handle this.
                                    return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, EnvironmentEdgeManager.currentTimeMillis(), null);
                                }
                                // have the parent table lock at this point.
                                for (PTable index : table.getIndexes()) {
                                    invalidateList.add(new ImmutableBytesPtr(SchemaUtil.getTableKey(tenantId, index.getSchemaName().getBytes(), index.getTableName().getBytes())));
                                    // able to be rowKeyOptimized, it should continue to be so.
                                    if (index.rowKeyOrderOptimizable()) {
                                        byte[] indexHeaderRowKey = SchemaUtil.getTableKey(index.getTenantId() == null ? ByteUtil.EMPTY_BYTE_ARRAY : index.getTenantId().getBytes(), index.getSchemaName().getBytes(), index.getTableName().getBytes());
                                        UpgradeUtil.addRowKeyOrderOptimizableCell(mutationsForAddingColumnsToViews, indexHeaderRowKey, clientTimeStamp);
                                    }
                                }
                            }
                            continue;
                        }
                    } else if (pkCount == COLUMN_NAME_INDEX && !(Bytes.compareTo(schemaName, rowKeyMetaData[SCHEMA_NAME_INDEX]) == 0 && Bytes.compareTo(tableName, rowKeyMetaData[TABLE_NAME_INDEX]) == 0)) {
                        // Invalidate any table with mutations
                        // TODO: this likely means we don't need the above logic that
                        // loops through the indexes if adding a PK column, since we'd
                        // always have header rows for those.
                        invalidateList.add(new ImmutableBytesPtr(SchemaUtil.getTableKey(tenantId, rowKeyMetaData[SCHEMA_NAME_INDEX], rowKeyMetaData[TABLE_NAME_INDEX])));
                    }
                }
                tableMetaData.addAll(mutationsForAddingColumnsToViews);
                return null;
            }
        });
        if (result != null) {
            done.run(MetaDataMutationResult.toProto(result));
        }
    } catch (IOException ioe) {
        ProtobufUtil.setControllerException(controller, ioe);
    }
}
Also used : SQLException(java.sql.SQLException) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) PTableType(org.apache.phoenix.schema.PTableType) ArrayList(java.util.ArrayList) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) ByteString(com.google.protobuf.ByteString) PColumnFamily(org.apache.phoenix.schema.PColumnFamily) PTable(org.apache.phoenix.schema.PTable) ColumnFamilyNotFoundException(org.apache.phoenix.schema.ColumnFamilyNotFoundException) ColumnNotFoundException(org.apache.phoenix.schema.ColumnNotFoundException) Region(org.apache.hadoop.hbase.regionserver.Region) FilterList(org.apache.hadoop.hbase.filter.FilterList) ArrayList(java.util.ArrayList) List(java.util.List) Mutation(org.apache.hadoop.hbase.client.Mutation) RowLock(org.apache.hadoop.hbase.regionserver.Region.RowLock)

Aggregations

ColumnFamilyNotFoundException (org.apache.phoenix.schema.ColumnFamilyNotFoundException)10 ColumnNotFoundException (org.apache.phoenix.schema.ColumnNotFoundException)9 PTable (org.apache.phoenix.schema.PTable)7 PColumn (org.apache.phoenix.schema.PColumn)6 PColumnFamily (org.apache.phoenix.schema.PColumnFamily)6 ColumnRef (org.apache.phoenix.schema.ColumnRef)5 ByteString (com.google.protobuf.ByteString)4 ArrayList (java.util.ArrayList)4 Mutation (org.apache.hadoop.hbase.client.Mutation)4 RowLock (org.apache.hadoop.hbase.regionserver.Region.RowLock)4 SQLException (java.sql.SQLException)3 List (java.util.List)3 Expression (org.apache.phoenix.expression.Expression)3 KeyValueColumnExpression (org.apache.phoenix.expression.KeyValueColumnExpression)3 LiteralExpression (org.apache.phoenix.expression.LiteralExpression)3 ProjectedColumnExpression (org.apache.phoenix.expression.ProjectedColumnExpression)3 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)3 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)3 LocalIndexDataColumnRef (org.apache.phoenix.schema.LocalIndexDataColumnRef)3 PTinyint (org.apache.phoenix.schema.types.PTinyint)3