Search in sources :

Example 6 with PMetaDataEntity

use of org.apache.phoenix.schema.PMetaDataEntity in project phoenix by apache.

the class MetaDataEndpointImpl method updateIndexState.

@Override
public void updateIndexState(RpcController controller, UpdateIndexStateRequest request, RpcCallback<MetaDataResponse> done) {
    MetaDataResponse.Builder builder = MetaDataResponse.newBuilder();
    byte[] schemaName = null;
    byte[] tableName = null;
    try {
        byte[][] rowKeyMetaData = new byte[3][];
        List<Mutation> tableMetadata = ProtobufUtil.getMutations(request);
        MetaDataUtil.getTenantIdAndSchemaAndTableName(tableMetadata, rowKeyMetaData);
        byte[] tenantId = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
        schemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
        tableName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
        final byte[] key = SchemaUtil.getTableKey(tenantId, schemaName, tableName);
        Region region = env.getRegion();
        MetaDataMutationResult result = checkTableKeyInRegion(key, region);
        if (result != null) {
            done.run(MetaDataMutationResult.toProto(result));
            return;
        }
        long timeStamp = HConstants.LATEST_TIMESTAMP;
        ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(key);
        List<Cell> newKVs = tableMetadata.get(0).getFamilyCellMap().get(TABLE_FAMILY_BYTES);
        Cell newKV = null;
        int disableTimeStampKVIndex = -1;
        int indexStateKVIndex = 0;
        int index = 0;
        for (Cell cell : newKVs) {
            if (Bytes.compareTo(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), INDEX_STATE_BYTES, 0, INDEX_STATE_BYTES.length) == 0) {
                newKV = cell;
                indexStateKVIndex = index;
                timeStamp = cell.getTimestamp();
            } else if (Bytes.compareTo(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), INDEX_DISABLE_TIMESTAMP_BYTES, 0, INDEX_DISABLE_TIMESTAMP_BYTES.length) == 0) {
                disableTimeStampKVIndex = index;
            }
            index++;
        }
        PIndexState newState = PIndexState.fromSerializedValue(newKV.getValueArray()[newKV.getValueOffset()]);
        RowLock rowLock = acquireLock(region, key, null);
        if (rowLock == null) {
            throw new IOException("Failed to acquire lock on " + Bytes.toStringBinary(key));
        }
        try {
            Get get = new Get(key);
            get.addColumn(TABLE_FAMILY_BYTES, DATA_TABLE_NAME_BYTES);
            get.addColumn(TABLE_FAMILY_BYTES, INDEX_STATE_BYTES);
            get.addColumn(TABLE_FAMILY_BYTES, INDEX_DISABLE_TIMESTAMP_BYTES);
            get.addColumn(TABLE_FAMILY_BYTES, ROW_KEY_ORDER_OPTIMIZABLE_BYTES);
            Result currentResult = region.get(get);
            if (currentResult.rawCells().length == 0) {
                builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_NOT_FOUND);
                builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
                done.run(builder.build());
                return;
            }
            Cell dataTableKV = currentResult.getColumnLatestCell(TABLE_FAMILY_BYTES, DATA_TABLE_NAME_BYTES);
            Cell currentStateKV = currentResult.getColumnLatestCell(TABLE_FAMILY_BYTES, INDEX_STATE_BYTES);
            Cell currentDisableTimeStamp = currentResult.getColumnLatestCell(TABLE_FAMILY_BYTES, INDEX_DISABLE_TIMESTAMP_BYTES);
            boolean rowKeyOrderOptimizable = currentResult.getColumnLatestCell(TABLE_FAMILY_BYTES, ROW_KEY_ORDER_OPTIMIZABLE_BYTES) != null;
            // check permission on data table
            long clientTimeStamp = MetaDataUtil.getClientTimeStamp(tableMetadata);
            PTable loadedTable = getTable(env, key, new ImmutableBytesPtr(key), clientTimeStamp, clientTimeStamp, request.getClientVersion());
            if (loadedTable == null) {
                builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_NOT_FOUND);
                builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
                done.run(builder.build());
                return;
            }
            getCoprocessorHost().preIndexUpdate(Bytes.toString(tenantId), SchemaUtil.getTableName(schemaName, tableName), TableName.valueOf(loadedTable.getPhysicalName().getBytes()), getParentPhysicalTableName(loadedTable), newState);
            PIndexState currentState = PIndexState.fromSerializedValue(currentStateKV.getValueArray()[currentStateKV.getValueOffset()]);
            // Timestamp of INDEX_STATE gets updated with each call
            long actualTimestamp = currentStateKV.getTimestamp();
            long curTimeStampVal = 0;
            long newDisableTimeStamp = 0;
            if ((currentDisableTimeStamp != null && currentDisableTimeStamp.getValueLength() > 0)) {
                curTimeStampVal = (Long) PLong.INSTANCE.toObject(currentDisableTimeStamp.getValueArray(), currentDisableTimeStamp.getValueOffset(), currentDisableTimeStamp.getValueLength());
                // new DisableTimeStamp is passed in
                if (disableTimeStampKVIndex >= 0) {
                    Cell newDisableTimeStampCell = newKVs.get(disableTimeStampKVIndex);
                    long expectedTimestamp = newDisableTimeStampCell.getTimestamp();
                    // index write failed before the rebuild was complete.
                    if (actualTimestamp > expectedTimestamp) {
                        builder.setReturnCode(MetaDataProtos.MutationCode.UNALLOWED_TABLE_MUTATION);
                        builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
                        done.run(builder.build());
                        return;
                    }
                    newDisableTimeStamp = (Long) PLong.INSTANCE.toObject(newDisableTimeStampCell.getValueArray(), newDisableTimeStampCell.getValueOffset(), newDisableTimeStampCell.getValueLength());
                    // index in which case the state will be INACTIVE or PENDING_ACTIVE.
                    if (curTimeStampVal != 0 && (newState == PIndexState.DISABLE || newState == PIndexState.PENDING_ACTIVE || newState == PIndexState.PENDING_DISABLE) && Math.abs(curTimeStampVal) < Math.abs(newDisableTimeStamp)) {
                        // do not reset disable timestamp as we want to keep the min
                        newKVs.remove(disableTimeStampKVIndex);
                        disableTimeStampKVIndex = -1;
                    }
                }
            }
            // Detect invalid transitions
            if (currentState == PIndexState.BUILDING) {
                if (newState == PIndexState.USABLE) {
                    builder.setReturnCode(MetaDataProtos.MutationCode.UNALLOWED_TABLE_MUTATION);
                    builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
                    done.run(builder.build());
                    return;
                }
            } else if (currentState == PIndexState.DISABLE) {
                // Can't transition back to INACTIVE if INDEX_DISABLE_TIMESTAMP is 0
                if (newState != PIndexState.BUILDING && newState != PIndexState.DISABLE && (newState != PIndexState.INACTIVE || curTimeStampVal == 0)) {
                    builder.setReturnCode(MetaDataProtos.MutationCode.UNALLOWED_TABLE_MUTATION);
                    builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
                    done.run(builder.build());
                    return;
                }
                // Done building, but was disable before that, so that in disabled state
                if (newState == PIndexState.ACTIVE) {
                    newState = PIndexState.DISABLE;
                }
                // Can't transition from DISABLE to PENDING_DISABLE
                if (newState == PIndexState.PENDING_DISABLE) {
                    builder.setReturnCode(MetaDataProtos.MutationCode.UNALLOWED_TABLE_MUTATION);
                    builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
                    done.run(builder.build());
                    return;
                }
            }
            if (currentState == PIndexState.BUILDING && newState != PIndexState.ACTIVE) {
                timeStamp = currentStateKV.getTimestamp();
            }
            if ((currentState == PIndexState.ACTIVE || currentState == PIndexState.PENDING_ACTIVE) && newState == PIndexState.UNUSABLE) {
                newState = PIndexState.INACTIVE;
                newKVs.set(indexStateKVIndex, KeyValueUtil.newKeyValue(key, TABLE_FAMILY_BYTES, INDEX_STATE_BYTES, timeStamp, Bytes.toBytes(newState.getSerializedValue())));
            } else if ((currentState == PIndexState.INACTIVE || currentState == PIndexState.PENDING_ACTIVE) && newState == PIndexState.USABLE) {
                // Don't allow manual state change to USABLE (i.e. ACTIVE) if non zero INDEX_DISABLE_TIMESTAMP
                if (curTimeStampVal != 0) {
                    newState = currentState;
                } else {
                    newState = PIndexState.ACTIVE;
                }
                newKVs.set(indexStateKVIndex, KeyValueUtil.newKeyValue(key, TABLE_FAMILY_BYTES, INDEX_STATE_BYTES, timeStamp, Bytes.toBytes(newState.getSerializedValue())));
            }
            PTable returnTable = null;
            if (currentState != newState || disableTimeStampKVIndex != -1) {
                // make a copy of tableMetadata so we can add to it
                tableMetadata = new ArrayList<Mutation>(tableMetadata);
                // Always include the empty column value at latest timestamp so
                // that clients pull over update.
                Put emptyValue = new Put(key);
                emptyValue.addColumn(TABLE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, HConstants.LATEST_TIMESTAMP, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
                tableMetadata.add(emptyValue);
                byte[] dataTableKey = null;
                if (dataTableKV != null) {
                    dataTableKey = SchemaUtil.getTableKey(tenantId, schemaName, CellUtil.cloneValue(dataTableKV));
                    // insert an empty KV to trigger time stamp update on data table row
                    Put p = new Put(dataTableKey);
                    p.addColumn(TABLE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, HConstants.LATEST_TIMESTAMP, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
                    tableMetadata.add(p);
                }
                boolean setRowKeyOrderOptimizableCell = newState == PIndexState.BUILDING && !rowKeyOrderOptimizable;
                // so that the row keys get generated using the new row key format
                if (setRowKeyOrderOptimizableCell) {
                    UpgradeUtil.addRowKeyOrderOptimizableCell(tableMetadata, key, timeStamp);
                }
                mutateRowsWithLocks(region, tableMetadata, Collections.<byte[]>emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE);
                // Invalidate from cache
                Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
                metaDataCache.invalidate(cacheKey);
                if (dataTableKey != null) {
                    metaDataCache.invalidate(new ImmutableBytesPtr(dataTableKey));
                }
                if (setRowKeyOrderOptimizableCell || disableTimeStampKVIndex != -1 || currentState == PIndexState.DISABLE || newState == PIndexState.BUILDING) {
                    returnTable = doGetTable(key, HConstants.LATEST_TIMESTAMP, rowLock, request.getClientVersion());
                }
            }
            // Get client timeStamp from mutations, since it may get updated by the
            // mutateRowsWithLocks call
            long currentTime = MetaDataUtil.getClientTimeStamp(tableMetadata);
            builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_ALREADY_EXISTS);
            builder.setMutationTime(currentTime);
            if (returnTable != null) {
                builder.setTable(PTableImpl.toProto(returnTable));
            }
            done.run(builder.build());
            return;
        } finally {
            rowLock.release();
        }
    } catch (Throwable t) {
        logger.error("updateIndexState failed", t);
        ProtobufUtil.setControllerException(controller, ServerUtil.createIOException(SchemaUtil.getTableName(schemaName, tableName), t));
    }
}
Also used : PTable(org.apache.phoenix.schema.PTable) Result(org.apache.hadoop.hbase.client.Result) PMetaDataEntity(org.apache.phoenix.schema.PMetaDataEntity) Cell(org.apache.hadoop.hbase.Cell) RowLock(org.apache.hadoop.hbase.regionserver.Region.RowLock) MetaDataResponse(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse) PIndexState(org.apache.phoenix.schema.PIndexState) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) PTinyint(org.apache.phoenix.schema.types.PTinyint) PSmallint(org.apache.phoenix.schema.types.PSmallint) Put(org.apache.hadoop.hbase.client.Put) Get(org.apache.hadoop.hbase.client.Get) Region(org.apache.hadoop.hbase.regionserver.Region) Mutation(org.apache.hadoop.hbase.client.Mutation)

Example 7 with PMetaDataEntity

use of org.apache.phoenix.schema.PMetaDataEntity in project phoenix by apache.

the class MetaDataEndpointImpl method dropTable.

@Override
public void dropTable(RpcController controller, DropTableRequest request, RpcCallback<MetaDataResponse> done) {
    MetaDataResponse.Builder builder = MetaDataResponse.newBuilder();
    boolean isCascade = request.getCascade();
    byte[][] rowKeyMetaData = new byte[3][];
    String tableType = request.getTableType();
    byte[] schemaName = null;
    byte[] tableName = null;
    try {
        List<Mutation> tableMetadata = ProtobufUtil.getMutations(request);
        MetaDataUtil.getTenantIdAndSchemaAndTableName(tableMetadata, rowKeyMetaData);
        byte[] tenantIdBytes = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
        schemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
        tableName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
        // Disallow deletion of a system table
        if (tableType.equals(PTableType.SYSTEM.getSerializedValue())) {
            builder.setReturnCode(MetaDataProtos.MutationCode.UNALLOWED_TABLE_MUTATION);
            builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
            done.run(builder.build());
            return;
        }
        List<byte[]> tableNamesToDelete = Lists.newArrayList();
        List<SharedTableState> sharedTablesToDelete = Lists.newArrayList();
        byte[] parentTableName = MetaDataUtil.getParentTableName(tableMetadata);
        byte[] lockTableName = parentTableName == null ? tableName : parentTableName;
        byte[] lockKey = SchemaUtil.getTableKey(tenantIdBytes, schemaName, lockTableName);
        byte[] key = parentTableName == null ? lockKey : SchemaUtil.getTableKey(tenantIdBytes, schemaName, tableName);
        Region region = env.getRegion();
        MetaDataMutationResult result = checkTableKeyInRegion(key, region);
        if (result != null) {
            done.run(MetaDataMutationResult.toProto(result));
            return;
        }
        PTableType ptableType = PTableType.fromSerializedValue(tableType);
        long clientTimeStamp = MetaDataUtil.getClientTimeStamp(tableMetadata);
        byte[] cKey = SchemaUtil.getTableKey(tenantIdBytes, schemaName, tableName);
        PTable loadedTable = getTable(env, cKey, new ImmutableBytesPtr(cKey), clientTimeStamp, clientTimeStamp, request.getClientVersion());
        if (loadedTable == null) {
            builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_NOT_FOUND);
            builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
            done.run(builder.build());
            return;
        }
        getCoprocessorHost().preDropTable(Bytes.toString(tenantIdBytes), SchemaUtil.getTableName(schemaName, tableName), TableName.valueOf(loadedTable.getPhysicalName().getBytes()), getParentPhysicalTableName(loadedTable), ptableType, loadedTable.getIndexes());
        List<RowLock> locks = Lists.newArrayList();
        try {
            acquireLock(region, lockKey, locks);
            if (key != lockKey) {
                acquireLock(region, key, locks);
            }
            List<ImmutableBytesPtr> invalidateList = new ArrayList<ImmutableBytesPtr>();
            result = doDropTable(key, tenantIdBytes, schemaName, tableName, parentTableName, PTableType.fromSerializedValue(tableType), tableMetadata, invalidateList, locks, tableNamesToDelete, sharedTablesToDelete, isCascade, request.getClientVersion());
            if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) {
                done.run(MetaDataMutationResult.toProto(result));
                return;
            }
            Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
            // Commit the list of deletion.
            mutateRowsWithLocks(region, tableMetadata, Collections.<byte[]>emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE);
            long currentTime = MetaDataUtil.getClientTimeStamp(tableMetadata);
            for (ImmutableBytesPtr ckey : invalidateList) {
                metaDataCache.put(ckey, newDeletedTableMarker(currentTime));
            }
            if (parentTableName != null) {
                ImmutableBytesPtr parentCacheKey = new ImmutableBytesPtr(lockKey);
                metaDataCache.invalidate(parentCacheKey);
            }
            done.run(MetaDataMutationResult.toProto(result));
            return;
        } finally {
            releaseRowLocks(region, locks);
        }
    } catch (Throwable t) {
        logger.error("dropTable failed", t);
        ProtobufUtil.setControllerException(controller, ServerUtil.createIOException(SchemaUtil.getTableName(schemaName, tableName), t));
    }
}
Also used : MetaDataResponse(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse) PTableType(org.apache.phoenix.schema.PTableType) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) ArrayList(java.util.ArrayList) ByteString(com.google.protobuf.ByteString) PTable(org.apache.phoenix.schema.PTable) PMetaDataEntity(org.apache.phoenix.schema.PMetaDataEntity) Region(org.apache.hadoop.hbase.regionserver.Region) Mutation(org.apache.hadoop.hbase.client.Mutation) RowLock(org.apache.hadoop.hbase.regionserver.Region.RowLock)

Example 8 with PMetaDataEntity

use of org.apache.phoenix.schema.PMetaDataEntity in project phoenix by apache.

the class MetaDataEndpointImpl method doGetFunctions.

private List<PFunction> doGetFunctions(List<byte[]> keys, long clientTimeStamp) throws IOException, SQLException {
    Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
    Region region = env.getRegion();
    Collections.sort(keys, new Comparator<byte[]>() {

        @Override
        public int compare(byte[] o1, byte[] o2) {
            return Bytes.compareTo(o1, o2);
        }
    });
    /*
         * Lock directly on key, though it may be an index table. This will just prevent a table
         * from getting rebuilt too often.
         */
    List<RowLock> rowLocks = new ArrayList<RowLock>(keys.size());
    ;
    try {
        for (int i = 0; i < keys.size(); i++) {
            acquireLock(region, keys.get(i), rowLocks);
        }
        List<PFunction> functionsAvailable = new ArrayList<PFunction>(keys.size());
        int numFunctions = keys.size();
        Iterator<byte[]> iterator = keys.iterator();
        while (iterator.hasNext()) {
            byte[] key = iterator.next();
            PFunction function = (PFunction) metaDataCache.getIfPresent(new FunctionBytesPtr(key));
            if (function != null && function.getTimeStamp() < clientTimeStamp) {
                if (isFunctionDeleted(function)) {
                    return null;
                }
                functionsAvailable.add(function);
                iterator.remove();
            }
        }
        if (functionsAvailable.size() == numFunctions)
            return functionsAvailable;
        // Query for the latest table first, since it's not cached
        List<PFunction> buildFunctions = buildFunctions(keys, region, clientTimeStamp, false, Collections.<Mutation>emptyList());
        if (buildFunctions == null || buildFunctions.isEmpty()) {
            return null;
        }
        functionsAvailable.addAll(buildFunctions);
        if (functionsAvailable.size() == numFunctions)
            return functionsAvailable;
        return null;
    } finally {
        releaseRowLocks(region, rowLocks);
    }
}
Also used : PFunction(org.apache.phoenix.parse.PFunction) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) ArrayList(java.util.ArrayList) PTinyint(org.apache.phoenix.schema.types.PTinyint) PSmallint(org.apache.phoenix.schema.types.PSmallint) PMetaDataEntity(org.apache.phoenix.schema.PMetaDataEntity) Region(org.apache.hadoop.hbase.regionserver.Region) RowLock(org.apache.hadoop.hbase.regionserver.Region.RowLock) FunctionBytesPtr(org.apache.phoenix.cache.GlobalCache.FunctionBytesPtr)

Example 9 with PMetaDataEntity

use of org.apache.phoenix.schema.PMetaDataEntity in project phoenix by apache.

the class MetaDataEndpointImpl method dropFunction.

@Override
public void dropFunction(RpcController controller, DropFunctionRequest request, RpcCallback<MetaDataResponse> done) {
    byte[][] rowKeyMetaData = new byte[2][];
    byte[] functionName = null;
    try {
        List<Mutation> functionMetaData = ProtobufUtil.getMutations(request);
        MetaDataUtil.getTenantIdAndFunctionName(functionMetaData, rowKeyMetaData);
        byte[] tenantIdBytes = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
        functionName = rowKeyMetaData[PhoenixDatabaseMetaData.FUNTION_NAME_INDEX];
        byte[] lockKey = SchemaUtil.getFunctionKey(tenantIdBytes, functionName);
        Region region = env.getRegion();
        MetaDataMutationResult result = checkFunctionKeyInRegion(lockKey, region);
        if (result != null) {
            done.run(MetaDataMutationResult.toProto(result));
            return;
        }
        List<RowLock> locks = Lists.newArrayList();
        long clientTimeStamp = MetaDataUtil.getClientTimeStamp(functionMetaData);
        try {
            acquireLock(region, lockKey, locks);
            List<byte[]> keys = new ArrayList<byte[]>(1);
            keys.add(lockKey);
            List<ImmutableBytesPtr> invalidateList = new ArrayList<ImmutableBytesPtr>();
            result = doDropFunction(clientTimeStamp, keys, functionMetaData, invalidateList);
            if (result.getMutationCode() != MutationCode.FUNCTION_ALREADY_EXISTS) {
                done.run(MetaDataMutationResult.toProto(result));
                return;
            }
            mutateRowsWithLocks(region, functionMetaData, Collections.<byte[]>emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE);
            Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
            long currentTime = MetaDataUtil.getClientTimeStamp(functionMetaData);
            for (ImmutableBytesPtr ptr : invalidateList) {
                metaDataCache.invalidate(ptr);
                metaDataCache.put(ptr, newDeletedFunctionMarker(currentTime));
            }
            done.run(MetaDataMutationResult.toProto(result));
            return;
        } finally {
            releaseRowLocks(region, locks);
        }
    } catch (Throwable t) {
        logger.error("dropFunction failed", t);
        ProtobufUtil.setControllerException(controller, ServerUtil.createIOException(Bytes.toString(functionName), t));
    }
}
Also used : ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) ArrayList(java.util.ArrayList) PMetaDataEntity(org.apache.phoenix.schema.PMetaDataEntity) Region(org.apache.hadoop.hbase.regionserver.Region) Mutation(org.apache.hadoop.hbase.client.Mutation) RowLock(org.apache.hadoop.hbase.regionserver.Region.RowLock)

Example 10 with PMetaDataEntity

use of org.apache.phoenix.schema.PMetaDataEntity in project phoenix by apache.

the class MetaDataEndpointImpl method mutateColumn.

private MetaDataMutationResult mutateColumn(List<Mutation> tableMetadata, ColumnMutator mutator, int clientVersion) throws IOException {
    byte[][] rowKeyMetaData = new byte[5][];
    MetaDataUtil.getTenantIdAndSchemaAndTableName(tableMetadata, rowKeyMetaData);
    byte[] tenantId = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
    byte[] schemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
    byte[] tableName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
    byte[] key = SchemaUtil.getTableKey(tenantId, schemaName, tableName);
    try {
        Region region = env.getRegion();
        MetaDataMutationResult result = checkTableKeyInRegion(key, region);
        if (result != null) {
            return result;
        }
        List<RowLock> locks = Lists.newArrayList();
        try {
            acquireLock(region, key, locks);
            ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(key);
            List<ImmutableBytesPtr> invalidateList = new ArrayList<ImmutableBytesPtr>();
            invalidateList.add(cacheKey);
            Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
            PTable table = (PTable) metaDataCache.getIfPresent(cacheKey);
            if (logger.isDebugEnabled()) {
                if (table == null) {
                    logger.debug("Table " + Bytes.toStringBinary(key) + " not found in cache. Will build through scan");
                } else {
                    logger.debug("Table " + Bytes.toStringBinary(key) + " found in cache with timestamp " + table.getTimeStamp() + " seqNum " + table.getSequenceNumber());
                }
            }
            // Get client timeStamp from mutations
            long clientTimeStamp = MetaDataUtil.getClientTimeStamp(tableMetadata);
            if (table == null && (table = buildTable(key, cacheKey, region, HConstants.LATEST_TIMESTAMP, clientVersion)) == null) {
                // if not found then call newerTableExists and add delete marker for timestamp
                // found
                table = buildDeletedTable(key, cacheKey, region, clientTimeStamp);
                if (table != null) {
                    logger.info("Found newer table deleted as of " + table.getTimeStamp() + " versus client timestamp of " + clientTimeStamp);
                    return new MetaDataMutationResult(MutationCode.NEWER_TABLE_FOUND, EnvironmentEdgeManager.currentTimeMillis(), null);
                }
                return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, EnvironmentEdgeManager.currentTimeMillis(), null);
            }
            if (table.getTimeStamp() >= clientTimeStamp) {
                logger.info("Found newer table as of " + table.getTimeStamp() + " versus client timestamp of " + clientTimeStamp);
                return new MetaDataMutationResult(MutationCode.NEWER_TABLE_FOUND, EnvironmentEdgeManager.currentTimeMillis(), table);
            } else if (isTableDeleted(table)) {
                return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, EnvironmentEdgeManager.currentTimeMillis(), null);
            }
            // lookup TABLE_SEQ_NUM in
            long expectedSeqNum = MetaDataUtil.getSequenceNumber(tableMetadata) - 1;
            if (logger.isDebugEnabled()) {
                logger.debug("For table " + Bytes.toStringBinary(key) + " expecting seqNum " + expectedSeqNum + " and found seqNum " + table.getSequenceNumber() + " with " + table.getColumns().size() + " columns: " + table.getColumns());
            }
            if (expectedSeqNum != table.getSequenceNumber()) {
                if (logger.isDebugEnabled()) {
                    logger.debug("For table " + Bytes.toStringBinary(key) + " returning CONCURRENT_TABLE_MUTATION due to unexpected seqNum");
                }
                return new MetaDataMutationResult(MutationCode.CONCURRENT_TABLE_MUTATION, EnvironmentEdgeManager.currentTimeMillis(), table);
            }
            PTableType type = table.getType();
            if (type == PTableType.INDEX) {
                // Disallow mutation of an index table
                return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, EnvironmentEdgeManager.currentTimeMillis(), null);
            } else {
                // server-side, except for indexing, we always expect the keyvalues to be standard KeyValues
                PTableType expectedType = MetaDataUtil.getTableType(tableMetadata, GenericKeyValueBuilder.INSTANCE, new ImmutableBytesWritable());
                // We said to drop a table, but found a view or visa versa
                if (type != expectedType) {
                    return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, EnvironmentEdgeManager.currentTimeMillis(), null);
                }
            }
            result = mutator.updateMutation(table, rowKeyMetaData, tableMetadata, region, invalidateList, locks, clientTimeStamp);
            // if the update mutation caused tables to be deleted, the mutation code returned will be MutationCode.TABLE_ALREADY_EXISTS
            if (result != null && result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) {
                return result;
            }
            mutateRowsWithLocks(region, tableMetadata, Collections.<byte[]>emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE);
            // Invalidate from cache
            for (ImmutableBytesPtr invalidateKey : invalidateList) {
                metaDataCache.invalidate(invalidateKey);
            }
            // Get client timeStamp from mutations, since it may get updated by the
            // mutateRowsWithLocks call
            long currentTime = MetaDataUtil.getClientTimeStamp(tableMetadata);
            // if the update mutation caused tables to be deleted just return the result which will contain the table to be deleted
            if (result != null) {
                return result;
            } else {
                table = buildTable(key, cacheKey, region, HConstants.LATEST_TIMESTAMP, clientVersion);
                return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, currentTime, table);
            }
        } finally {
            releaseRowLocks(region, locks);
        }
    } catch (Throwable t) {
        ServerUtil.throwIOException(SchemaUtil.getTableName(schemaName, tableName), t);
        // impossible
        return null;
    }
}
Also used : ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) PTableType(org.apache.phoenix.schema.PTableType) ArrayList(java.util.ArrayList) PTable(org.apache.phoenix.schema.PTable) PMetaDataEntity(org.apache.phoenix.schema.PMetaDataEntity) Region(org.apache.hadoop.hbase.regionserver.Region) RowLock(org.apache.hadoop.hbase.regionserver.Region.RowLock)

Aggregations

ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)27 PMetaDataEntity (org.apache.phoenix.schema.PMetaDataEntity)27 Region (org.apache.hadoop.hbase.regionserver.Region)18 PTable (org.apache.phoenix.schema.PTable)14 ArrayList (java.util.ArrayList)11 RowLock (org.apache.hadoop.hbase.regionserver.Region.RowLock)10 Scan (org.apache.hadoop.hbase.client.Scan)9 RegionScanner (org.apache.hadoop.hbase.regionserver.RegionScanner)9 Cell (org.apache.hadoop.hbase.Cell)7 Mutation (org.apache.hadoop.hbase.client.Mutation)7 PSmallint (org.apache.phoenix.schema.types.PSmallint)7 PTinyint (org.apache.phoenix.schema.types.PTinyint)7 MetaDataResponse (org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse)5 PFunction (org.apache.phoenix.parse.PFunction)5 ByteString (com.google.protobuf.ByteString)4 PSchema (org.apache.phoenix.parse.PSchema)4 FirstKeyOnlyFilter (org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter)3 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)3 FunctionBytesPtr (org.apache.phoenix.cache.GlobalCache.FunctionBytesPtr)3 PName (org.apache.phoenix.schema.PName)3