Search in sources :

Example 11 with RowLock

use of org.apache.hadoop.hbase.regionserver.Region.RowLock in project phoenix by apache.

the class MetaDataEndpointImpl method acquireLock.

private static RowLock acquireLock(Region region, byte[] key, List<RowLock> locks) throws IOException {
    RowLock rowLock = region.getRowLock(key, false);
    if (rowLock == null) {
        throw new IOException("Failed to acquire lock on " + Bytes.toStringBinary(key));
    }
    locks.add(rowLock);
    return rowLock;
}
Also used : IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) RowLock(org.apache.hadoop.hbase.regionserver.Region.RowLock)

Example 12 with RowLock

use of org.apache.hadoop.hbase.regionserver.Region.RowLock in project phoenix by apache.

the class SequenceRegionObserver method acquireLock.

private static void acquireLock(Region region, byte[] key, List<RowLock> locks) throws IOException {
    RowLock rowLock = region.getRowLock(key, false);
    if (rowLock == null) {
        throw new IOException("Failed to acquire lock on " + Bytes.toStringBinary(key));
    }
    locks.add(rowLock);
}
Also used : IOException(java.io.IOException) RowLock(org.apache.hadoop.hbase.regionserver.Region.RowLock)

Example 13 with RowLock

use of org.apache.hadoop.hbase.regionserver.Region.RowLock in project phoenix by apache.

the class MetaDataEndpointImpl method dropColumn.

@Override
public void dropColumn(RpcController controller, DropColumnRequest request, RpcCallback<MetaDataResponse> done) {
    List<Mutation> tableMetaData = null;
    final List<byte[]> tableNamesToDelete = Lists.newArrayList();
    final List<SharedTableState> sharedTablesToDelete = Lists.newArrayList();
    try {
        tableMetaData = ProtobufUtil.getMutations(request);
        MetaDataMutationResult result = mutateColumn(tableMetaData, new ColumnMutator() {

            @Override
            public MetaDataMutationResult updateMutation(PTable table, byte[][] rowKeyMetaData, List<Mutation> tableMetaData, Region region, List<ImmutableBytesPtr> invalidateList, List<RowLock> locks, long clientTimeStamp) throws IOException, SQLException {
                byte[] tenantId = rowKeyMetaData[TENANT_ID_INDEX];
                byte[] schemaName = rowKeyMetaData[SCHEMA_NAME_INDEX];
                byte[] tableName = rowKeyMetaData[TABLE_NAME_INDEX];
                boolean deletePKColumn = false;
                List<Mutation> additionalTableMetaData = Lists.newArrayList();
                PTableType type = table.getType();
                if (type == PTableType.TABLE || type == PTableType.SYSTEM) {
                    TableViewFinder childViewsResult = new TableViewFinder();
                    findAllChildViews(region, tenantId, table, childViewsResult, clientTimeStamp);
                    if (childViewsResult.hasViews()) {
                        MetaDataMutationResult mutationResult = dropColumnsFromChildViews(region, table, locks, tableMetaData, additionalTableMetaData, schemaName, tableName, invalidateList, clientTimeStamp, childViewsResult, tableNamesToDelete, sharedTablesToDelete);
                        // return if we were not able to drop the column successfully
                        if (mutationResult != null)
                            return mutationResult;
                    }
                }
                for (Mutation m : tableMetaData) {
                    if (m instanceof Delete) {
                        byte[] key = m.getRow();
                        int pkCount = getVarChars(key, rowKeyMetaData);
                        if (pkCount > COLUMN_NAME_INDEX && Bytes.compareTo(schemaName, rowKeyMetaData[SCHEMA_NAME_INDEX]) == 0 && Bytes.compareTo(tableName, rowKeyMetaData[TABLE_NAME_INDEX]) == 0) {
                            PColumn columnToDelete = null;
                            try {
                                if (pkCount > FAMILY_NAME_INDEX && rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX].length > 0) {
                                    PColumnFamily family = table.getColumnFamily(rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]);
                                    columnToDelete = family.getPColumnForColumnNameBytes(rowKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX]);
                                } else if (pkCount > COLUMN_NAME_INDEX && rowKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX].length > 0) {
                                    deletePKColumn = true;
                                    columnToDelete = table.getPKColumn(new String(rowKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX]));
                                } else {
                                    continue;
                                }
                                if (table.getType() == PTableType.VIEW) {
                                    if (table.getBaseColumnCount() != DIVERGED_VIEW_BASE_COLUMN_COUNT && columnToDelete.getPosition() < table.getBaseColumnCount()) {
                                        /*
                                             * If the column being dropped is inherited from the base table, then the
                                             * view is about to diverge itself from the base table. The consequence of
                                             * this divergence is that that any further meta-data changes made to the
                                             * base table will not be propagated to the hierarchy of views where this
                                             * view is the root.
                                             */
                                        byte[] viewKey = SchemaUtil.getTableKey(tenantId, schemaName, tableName);
                                        Put updateBaseColumnCountPut = new Put(viewKey);
                                        byte[] baseColumnCountPtr = new byte[PInteger.INSTANCE.getByteSize()];
                                        PInteger.INSTANCE.getCodec().encodeInt(DIVERGED_VIEW_BASE_COLUMN_COUNT, baseColumnCountPtr, 0);
                                        updateBaseColumnCountPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.BASE_COLUMN_COUNT_BYTES, clientTimeStamp, baseColumnCountPtr);
                                        additionalTableMetaData.add(updateBaseColumnCountPut);
                                    }
                                }
                                if (columnToDelete.isViewReferenced()) {
                                    // Disallow deletion of column referenced in WHERE clause of view
                                    return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, EnvironmentEdgeManager.currentTimeMillis(), table, columnToDelete);
                                }
                                // drop any indexes that need the column that is going to be dropped
                                dropIndexes(table, region, invalidateList, locks, clientTimeStamp, schemaName, tableName, additionalTableMetaData, columnToDelete, tableNamesToDelete, sharedTablesToDelete);
                            } catch (ColumnFamilyNotFoundException e) {
                                return new MetaDataMutationResult(MutationCode.COLUMN_NOT_FOUND, EnvironmentEdgeManager.currentTimeMillis(), table, columnToDelete);
                            } catch (ColumnNotFoundException e) {
                                return new MetaDataMutationResult(MutationCode.COLUMN_NOT_FOUND, EnvironmentEdgeManager.currentTimeMillis(), table, columnToDelete);
                            }
                        }
                    }
                }
                if (deletePKColumn) {
                    if (table.getPKColumns().size() == 1) {
                        return new MetaDataMutationResult(MutationCode.NO_PK_COLUMNS, EnvironmentEdgeManager.currentTimeMillis(), null);
                    }
                }
                tableMetaData.addAll(additionalTableMetaData);
                long currentTime = MetaDataUtil.getClientTimeStamp(tableMetaData);
                return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, currentTime, null, tableNamesToDelete, sharedTablesToDelete);
            }
        });
        if (result != null) {
            done.run(MetaDataMutationResult.toProto(result));
        }
    } catch (IOException ioe) {
        ProtobufUtil.setControllerException(controller, ioe);
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) SQLException(java.sql.SQLException) ByteString(com.google.protobuf.ByteString) PTable(org.apache.phoenix.schema.PTable) PColumn(org.apache.phoenix.schema.PColumn) FilterList(org.apache.hadoop.hbase.filter.FilterList) ArrayList(java.util.ArrayList) List(java.util.List) RowLock(org.apache.hadoop.hbase.regionserver.Region.RowLock) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) PTableType(org.apache.phoenix.schema.PTableType) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) PColumnFamily(org.apache.phoenix.schema.PColumnFamily) Put(org.apache.hadoop.hbase.client.Put) ColumnFamilyNotFoundException(org.apache.phoenix.schema.ColumnFamilyNotFoundException) ColumnNotFoundException(org.apache.phoenix.schema.ColumnNotFoundException) Region(org.apache.hadoop.hbase.regionserver.Region) Mutation(org.apache.hadoop.hbase.client.Mutation)

Example 14 with RowLock

use of org.apache.hadoop.hbase.regionserver.Region.RowLock in project phoenix by apache.

the class MetaDataEndpointImpl method dropColumnsFromChildViews.

private MetaDataMutationResult dropColumnsFromChildViews(Region region, PTable basePhysicalTable, List<RowLock> locks, List<Mutation> tableMetadata, List<Mutation> mutationsForAddingColumnsToViews, byte[] schemaName, byte[] tableName, List<ImmutableBytesPtr> invalidateList, long clientTimeStamp, TableViewFinder childViewsResult, List<byte[]> tableNamesToDelete, List<SharedTableState> sharedTablesToDelete) throws IOException, SQLException {
    List<Delete> columnDeletesForBaseTable = new ArrayList<>(tableMetadata.size());
    // are being added.
    for (Mutation m : tableMetadata) {
        if (m instanceof Delete) {
            byte[][] rkmd = new byte[5][];
            int pkCount = getVarChars(m.getRow(), rkmd);
            if (pkCount > COLUMN_NAME_INDEX && Bytes.compareTo(schemaName, rkmd[SCHEMA_NAME_INDEX]) == 0 && Bytes.compareTo(tableName, rkmd[TABLE_NAME_INDEX]) == 0) {
                columnDeletesForBaseTable.add((Delete) m);
            }
        }
    }
    for (ViewInfo viewInfo : childViewsResult.getViewInfoList()) {
        short numColsDeleted = 0;
        byte[] viewTenantId = viewInfo.getTenantId();
        byte[] viewSchemaName = viewInfo.getSchemaName();
        byte[] viewName = viewInfo.getViewName();
        byte[] viewKey = SchemaUtil.getTableKey(viewTenantId, viewSchemaName, viewName);
        // lock the rows corresponding to views so that no other thread can modify the view
        // meta-data
        RowLock viewRowLock = acquireLock(region, viewKey, locks);
        PTable view = doGetTable(viewKey, clientTimeStamp, viewRowLock);
        ColumnOrdinalPositionUpdateList ordinalPositionList = new ColumnOrdinalPositionUpdateList();
        int numCols = view.getColumns().size();
        int minDroppedColOrdinalPos = Integer.MAX_VALUE;
        for (Delete columnDeleteForBaseTable : columnDeletesForBaseTable) {
            PColumn existingViewColumn = null;
            byte[][] rkmd = new byte[5][];
            getVarChars(columnDeleteForBaseTable.getRow(), rkmd);
            String columnName = Bytes.toString(rkmd[COLUMN_NAME_INDEX]);
            String columnFamily = rkmd[FAMILY_NAME_INDEX] == null ? null : Bytes.toString(rkmd[FAMILY_NAME_INDEX]);
            byte[] columnKey = getColumnKey(viewKey, columnName, columnFamily);
            try {
                existingViewColumn = columnFamily == null ? view.getColumnForColumnName(columnName) : view.getColumnFamily(columnFamily).getPColumnForColumnName(columnName);
            } catch (ColumnFamilyNotFoundException e) {
            // ignore since it means that the column family is not present for the column to
            // be added.
            } catch (ColumnNotFoundException e) {
            // ignore since it means the column is not present in the view
            }
            // it
            if (existingViewColumn != null && view.getViewStatement() != null) {
                ParseNode viewWhere = new SQLParser(view.getViewStatement()).parseQuery().getWhere();
                PhoenixConnection conn = null;
                try {
                    conn = QueryUtil.getConnectionOnServer(env.getConfiguration()).unwrap(PhoenixConnection.class);
                } catch (ClassNotFoundException e) {
                }
                PhoenixStatement statement = new PhoenixStatement(conn);
                TableRef baseTableRef = new TableRef(basePhysicalTable);
                ColumnResolver columnResolver = FromCompiler.getResolver(baseTableRef);
                StatementContext context = new StatementContext(statement, columnResolver);
                Expression whereExpression = WhereCompiler.compile(context, viewWhere);
                Expression colExpression = new ColumnRef(baseTableRef, existingViewColumn.getPosition()).newColumnExpression();
                ColumnFinder columnFinder = new ColumnFinder(colExpression);
                whereExpression.accept(columnFinder);
                if (columnFinder.getColumnFound()) {
                    return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, EnvironmentEdgeManager.currentTimeMillis(), basePhysicalTable);
                }
            }
            minDroppedColOrdinalPos = Math.min(getOrdinalPosition(view, existingViewColumn), minDroppedColOrdinalPos);
            if (existingViewColumn != null) {
                --numColsDeleted;
                if (ordinalPositionList.size() == 0) {
                    ordinalPositionList.setOffset(view.getBucketNum() == null ? 1 : 0);
                    for (PColumn col : view.getColumns()) {
                        ordinalPositionList.addColumn(getColumnKey(viewKey, col));
                    }
                }
                ordinalPositionList.dropColumn(columnKey);
                Delete viewColumnDelete = new Delete(columnKey, clientTimeStamp);
                mutationsForAddingColumnsToViews.add(viewColumnDelete);
                // drop any view indexes that need this column
                dropIndexes(view, region, invalidateList, locks, clientTimeStamp, schemaName, view.getName().getBytes(), mutationsForAddingColumnsToViews, existingViewColumn, tableNamesToDelete, sharedTablesToDelete);
            }
        }
        updateViewHeaderRow(basePhysicalTable, tableMetadata, mutationsForAddingColumnsToViews, invalidateList, clientTimeStamp, numColsDeleted, numColsDeleted, viewKey, view, ordinalPositionList, numCols, true);
    }
    return null;
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ArrayList(java.util.ArrayList) ByteString(com.google.protobuf.ByteString) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) PTable(org.apache.phoenix.schema.PTable) StatementContext(org.apache.phoenix.compile.StatementContext) PColumn(org.apache.phoenix.schema.PColumn) ParseNode(org.apache.phoenix.parse.ParseNode) LiteralParseNode(org.apache.phoenix.parse.LiteralParseNode) ColumnResolver(org.apache.phoenix.compile.ColumnResolver) RowLock(org.apache.hadoop.hbase.regionserver.Region.RowLock) PTinyint(org.apache.phoenix.schema.types.PTinyint) PSmallint(org.apache.phoenix.schema.types.PSmallint) ColumnFamilyNotFoundException(org.apache.phoenix.schema.ColumnFamilyNotFoundException) ColumnNotFoundException(org.apache.phoenix.schema.ColumnNotFoundException) SQLParser(org.apache.phoenix.parse.SQLParser) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) RowKeyColumnExpression(org.apache.phoenix.expression.RowKeyColumnExpression) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) Expression(org.apache.phoenix.expression.Expression) ProjectedColumnExpression(org.apache.phoenix.expression.ProjectedColumnExpression) Mutation(org.apache.hadoop.hbase.client.Mutation) ColumnRef(org.apache.phoenix.schema.ColumnRef) TableRef(org.apache.phoenix.schema.TableRef)

Example 15 with RowLock

use of org.apache.hadoop.hbase.regionserver.Region.RowLock in project phoenix by apache.

the class MetaDataEndpointImpl method createSchema.

@Override
public void createSchema(RpcController controller, CreateSchemaRequest request, RpcCallback<MetaDataResponse> done) {
    MetaDataResponse.Builder builder = MetaDataResponse.newBuilder();
    String schemaName = null;
    try {
        List<Mutation> schemaMutations = ProtobufUtil.getMutations(request);
        schemaName = request.getSchemaName();
        Mutation m = MetaDataUtil.getPutOnlyTableHeaderRow(schemaMutations);
        byte[] lockKey = m.getRow();
        Region region = env.getRegion();
        MetaDataMutationResult result = checkSchemaKeyInRegion(lockKey, region);
        if (result != null) {
            done.run(MetaDataMutationResult.toProto(result));
            return;
        }
        List<RowLock> locks = Lists.newArrayList();
        long clientTimeStamp = MetaDataUtil.getClientTimeStamp(schemaMutations);
        try {
            acquireLock(region, lockKey, locks);
            // Get as of latest timestamp so we can detect if we have a newer schema that already exists without
            // making an additional query
            ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(lockKey);
            PSchema schema = loadSchema(env, lockKey, cacheKey, clientTimeStamp, clientTimeStamp);
            if (schema != null) {
                if (schema.getTimeStamp() < clientTimeStamp) {
                    if (!isSchemaDeleted(schema)) {
                        builder.setReturnCode(MetaDataProtos.MutationCode.SCHEMA_ALREADY_EXISTS);
                        builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
                        builder.setSchema(PSchema.toProto(schema));
                        done.run(builder.build());
                        return;
                    }
                } else {
                    builder.setReturnCode(MetaDataProtos.MutationCode.NEWER_SCHEMA_FOUND);
                    builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
                    builder.setSchema(PSchema.toProto(schema));
                    done.run(builder.build());
                    return;
                }
            }
            region.mutateRowsWithLocks(schemaMutations, Collections.<byte[]>emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE);
            // Invalidate the cache - the next getSchema call will add it
            Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
            if (cacheKey != null) {
                metaDataCache.invalidate(cacheKey);
            }
            // Get timeStamp from mutations - the above method sets it if
            // it's unset
            long currentTimeStamp = MetaDataUtil.getClientTimeStamp(schemaMutations);
            builder.setReturnCode(MetaDataProtos.MutationCode.SCHEMA_NOT_FOUND);
            builder.setMutationTime(currentTimeStamp);
            done.run(builder.build());
            return;
        } finally {
            region.releaseRowLocks(locks);
        }
    } catch (Throwable t) {
        logger.error("Creating the schema" + schemaName + "failed", t);
        ProtobufUtil.setControllerException(controller, ServerUtil.createIOException(schemaName, t));
    }
}
Also used : MetaDataResponse(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) PSchema(org.apache.phoenix.parse.PSchema) ByteString(com.google.protobuf.ByteString) PMetaDataEntity(org.apache.phoenix.schema.PMetaDataEntity) Region(org.apache.hadoop.hbase.regionserver.Region) Mutation(org.apache.hadoop.hbase.client.Mutation) RowLock(org.apache.hadoop.hbase.regionserver.Region.RowLock)

Aggregations

RowLock (org.apache.hadoop.hbase.regionserver.Region.RowLock)20 Mutation (org.apache.hadoop.hbase.client.Mutation)14 Region (org.apache.hadoop.hbase.regionserver.Region)14 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)13 ArrayList (java.util.ArrayList)10 ByteString (com.google.protobuf.ByteString)9 IOException (java.io.IOException)9 PMetaDataEntity (org.apache.phoenix.schema.PMetaDataEntity)9 Put (org.apache.hadoop.hbase.client.Put)8 PTable (org.apache.phoenix.schema.PTable)7 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)6 MetaDataResponse (org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse)6 Cell (org.apache.hadoop.hbase.Cell)5 PSmallint (org.apache.phoenix.schema.types.PSmallint)5 PTinyint (org.apache.phoenix.schema.types.PTinyint)5 SQLException (java.sql.SQLException)4 List (java.util.List)4 ColumnFamilyNotFoundException (org.apache.phoenix.schema.ColumnFamilyNotFoundException)4 ColumnNotFoundException (org.apache.phoenix.schema.ColumnNotFoundException)4 PColumn (org.apache.phoenix.schema.PColumn)4