Search in sources :

Example 1 with MetaDataResponse

use of org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse in project phoenix by apache.

the class MetaDataEndpointImpl method getFunctions.

@Override
public void getFunctions(RpcController controller, GetFunctionsRequest request, RpcCallback<MetaDataResponse> done) {
    MetaDataResponse.Builder builder = MetaDataResponse.newBuilder();
    byte[] tenantId = request.getTenantId().toByteArray();
    List<String> functionNames = new ArrayList<>(request.getFunctionNamesCount());
    try {
        Region region = env.getRegion();
        List<ByteString> functionNamesList = request.getFunctionNamesList();
        List<Long> functionTimestampsList = request.getFunctionTimestampsList();
        List<byte[]> keys = new ArrayList<byte[]>(request.getFunctionNamesCount());
        List<Pair<byte[], Long>> functions = new ArrayList<Pair<byte[], Long>>(request.getFunctionNamesCount());
        for (int i = 0; i < functionNamesList.size(); i++) {
            byte[] functionName = functionNamesList.get(i).toByteArray();
            functionNames.add(Bytes.toString(functionName));
            byte[] key = SchemaUtil.getFunctionKey(tenantId, functionName);
            MetaDataMutationResult result = checkFunctionKeyInRegion(key, region);
            if (result != null) {
                done.run(MetaDataMutationResult.toProto(result));
                return;
            }
            functions.add(new Pair<byte[], Long>(functionName, functionTimestampsList.get(i)));
            keys.add(key);
        }
        long currentTime = EnvironmentEdgeManager.currentTimeMillis();
        List<PFunction> functionsAvailable = doGetFunctions(keys, request.getClientTimestamp());
        if (functionsAvailable == null) {
            builder.setReturnCode(MetaDataProtos.MutationCode.FUNCTION_NOT_FOUND);
            builder.setMutationTime(currentTime);
            done.run(builder.build());
            return;
        }
        builder.setReturnCode(MetaDataProtos.MutationCode.FUNCTION_ALREADY_EXISTS);
        builder.setMutationTime(currentTime);
        for (PFunction function : functionsAvailable) {
            builder.addFunction(PFunction.toProto(function));
        }
        done.run(builder.build());
        return;
    } catch (Throwable t) {
        logger.error("getFunctions failed", t);
        ProtobufUtil.setControllerException(controller, ServerUtil.createIOException(functionNames.toString(), t));
    }
}
Also used : MetaDataResponse(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse) PFunction(org.apache.phoenix.parse.PFunction) ByteString(com.google.protobuf.ByteString) ArrayList(java.util.ArrayList) ByteString(com.google.protobuf.ByteString) PTinyint(org.apache.phoenix.schema.types.PTinyint) PSmallint(org.apache.phoenix.schema.types.PSmallint) PLong(org.apache.phoenix.schema.types.PLong) Region(org.apache.hadoop.hbase.regionserver.Region) Pair(org.apache.hadoop.hbase.util.Pair)

Example 2 with MetaDataResponse

use of org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse in project phoenix by apache.

the class IndexUtil method setIndexDisableTimeStamp.

public static MetaDataMutationResult setIndexDisableTimeStamp(String indexTableName, long minTimeStamp, HTableInterface metaTable, PIndexState newState) throws ServiceException, Throwable {
    byte[] indexTableKey = SchemaUtil.getTableKeyFromFullName(indexTableName);
    // Mimic the Put that gets generated by the client on an update of the index state
    Put put = new Put(indexTableKey);
    put.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.INDEX_STATE_BYTES, newState.getSerializedBytes());
    put.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP_BYTES, PLong.INSTANCE.toBytes(minTimeStamp));
    put.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.ASYNC_REBUILD_TIMESTAMP_BYTES, PLong.INSTANCE.toBytes(0));
    final List<Mutation> tableMetadata = Collections.<Mutation>singletonList(put);
    final Map<byte[], MetaDataResponse> results = metaTable.coprocessorService(MetaDataService.class, indexTableKey, indexTableKey, new Batch.Call<MetaDataService, MetaDataResponse>() {

        @Override
        public MetaDataResponse call(MetaDataService instance) throws IOException {
            ServerRpcController controller = new ServerRpcController();
            BlockingRpcCallback<MetaDataResponse> rpcCallback = new BlockingRpcCallback<MetaDataResponse>();
            UpdateIndexStateRequest.Builder builder = UpdateIndexStateRequest.newBuilder();
            for (Mutation m : tableMetadata) {
                MutationProto mp = ProtobufUtil.toProto(m);
                builder.addTableMetadataMutations(mp.toByteString());
            }
            instance.updateIndexState(controller, builder.build(), rpcCallback);
            if (controller.getFailedOn() != null) {
                throw controller.getFailedOn();
            }
            return rpcCallback.get();
        }
    });
    if (results.isEmpty()) {
        throw new IOException("Didn't get expected result size");
    }
    MetaDataResponse tmpResponse = results.values().iterator().next();
    return MetaDataMutationResult.constructFromProto(tmpResponse);
}
Also used : MetaDataResponse(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse) KeyValueBuilder(org.apache.phoenix.hbase.index.util.KeyValueBuilder) IOException(java.io.IOException) ServerRpcController(org.apache.hadoop.hbase.ipc.ServerRpcController) Put(org.apache.hadoop.hbase.client.Put) MutationProto(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto) MetaDataService(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService) Batch(org.apache.hadoop.hbase.client.coprocessor.Batch) BlockingRpcCallback(org.apache.hadoop.hbase.ipc.BlockingRpcCallback) Mutation(org.apache.hadoop.hbase.client.Mutation)

Example 3 with MetaDataResponse

use of org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse in project phoenix by apache.

the class ConnectionQueryServicesImpl method dropTable.

@Override
public MetaDataMutationResult dropTable(final List<Mutation> tableMetaData, final PTableType tableType, final boolean cascade) throws SQLException {
    byte[][] rowKeyMetadata = new byte[3][];
    SchemaUtil.getVarChars(tableMetaData.get(0).getRow(), rowKeyMetadata);
    byte[] tenantIdBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
    byte[] schemaBytes = rowKeyMetadata[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
    byte[] tableBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
    byte[] tableKey = SchemaUtil.getTableKey(tenantIdBytes == null ? ByteUtil.EMPTY_BYTE_ARRAY : tenantIdBytes, schemaBytes, tableBytes);
    final MetaDataMutationResult result = metaDataCoprocessorExec(tableKey, new Batch.Call<MetaDataService, MetaDataResponse>() {

        @Override
        public MetaDataResponse call(MetaDataService instance) throws IOException {
            ServerRpcController controller = new ServerRpcController();
            BlockingRpcCallback<MetaDataResponse> rpcCallback = new BlockingRpcCallback<MetaDataResponse>();
            DropTableRequest.Builder builder = DropTableRequest.newBuilder();
            for (Mutation m : tableMetaData) {
                MutationProto mp = ProtobufUtil.toProto(m);
                builder.addTableMetadataMutations(mp.toByteString());
            }
            builder.setTableType(tableType.getSerializedValue());
            builder.setCascade(cascade);
            builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER));
            instance.dropTable(controller, builder.build(), rpcCallback);
            if (controller.getFailedOn() != null) {
                throw controller.getFailedOn();
            }
            return rpcCallback.get();
        }
    });
    final MutationCode code = result.getMutationCode();
    switch(code) {
        case TABLE_ALREADY_EXISTS:
            ReadOnlyProps props = this.getProps();
            boolean dropMetadata = props.getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA);
            PTable table = result.getTable();
            if (dropMetadata) {
                flushParentPhysicalTable(table);
                dropTables(result.getTableNamesToDelete());
            } else {
                invalidateTableStats(result.getTableNamesToDelete());
            }
            long timestamp = MetaDataUtil.getClientTimeStamp(tableMetaData);
            if (tableType == PTableType.TABLE) {
                byte[] physicalName = table.getPhysicalName().getBytes();
                ensureViewIndexTableDropped(physicalName, timestamp);
                ensureLocalIndexTableDropped(physicalName, timestamp);
                tableStatsCache.invalidateAll(table);
            }
            break;
        default:
            break;
    }
    return result;
}
Also used : MetaDataResponse(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse) KeyValueBuilder(org.apache.phoenix.hbase.index.util.KeyValueBuilder) NonTxIndexBuilder(org.apache.phoenix.hbase.index.covered.NonTxIndexBuilder) PhoenixIndexBuilder(org.apache.phoenix.index.PhoenixIndexBuilder) IOException(java.io.IOException) PhoenixIOException(org.apache.phoenix.exception.PhoenixIOException) ServerRpcController(org.apache.hadoop.hbase.ipc.ServerRpcController) MutationProto(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto) MutationCode(org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode) PTable(org.apache.phoenix.schema.PTable) ReadOnlyProps(org.apache.phoenix.util.ReadOnlyProps) MetaDataService(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService) Batch(org.apache.hadoop.hbase.client.coprocessor.Batch) BlockingRpcCallback(org.apache.hadoop.hbase.ipc.BlockingRpcCallback) Mutation(org.apache.hadoop.hbase.client.Mutation) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)

Example 4 with MetaDataResponse

use of org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse in project phoenix by apache.

the class ConnectionQueryServicesImpl method metaDataCoprocessorExec.

/**
 * Invoke meta data coprocessor with one retry if the key was found to not be in the regions
 * (due to a table split)
 */
private MetaDataMutationResult metaDataCoprocessorExec(byte[] tableKey, Batch.Call<MetaDataService, MetaDataResponse> callable, byte[] tableName) throws SQLException {
    try {
        boolean retried = false;
        while (true) {
            if (retried) {
                connection.relocateRegion(SchemaUtil.getPhysicalName(tableName, this.getProps()), tableKey);
            }
            HTableInterface ht = this.getTable(SchemaUtil.getPhysicalName(tableName, this.getProps()).getName());
            try {
                final Map<byte[], MetaDataResponse> results = ht.coprocessorService(MetaDataService.class, tableKey, tableKey, callable);
                assert (results.size() == 1);
                MetaDataResponse result = results.values().iterator().next();
                if (result.getReturnCode() == MetaDataProtos.MutationCode.TABLE_NOT_IN_REGION || result.getReturnCode() == MetaDataProtos.MutationCode.FUNCTION_NOT_IN_REGION) {
                    if (retried)
                        return MetaDataMutationResult.constructFromProto(result);
                    retried = true;
                    continue;
                }
                return MetaDataMutationResult.constructFromProto(result);
            } finally {
                Closeables.closeQuietly(ht);
            }
        }
    } catch (IOException e) {
        throw ServerUtil.parseServerException(e);
    } catch (Throwable t) {
        throw new SQLException(t);
    }
}
Also used : MetaDataResponse(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse) SQLException(java.sql.SQLException) IOException(java.io.IOException) PhoenixIOException(org.apache.phoenix.exception.PhoenixIOException) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface)

Example 5 with MetaDataResponse

use of org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse in project phoenix by apache.

the class ConnectionQueryServicesImpl method dropFunction.

@Override
public MetaDataMutationResult dropFunction(final List<Mutation> functionData, final boolean ifExists) throws SQLException {
    byte[][] rowKeyMetadata = new byte[2][];
    byte[] key = functionData.get(0).getRow();
    SchemaUtil.getVarChars(key, rowKeyMetadata);
    byte[] tenantIdBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
    byte[] functionBytes = rowKeyMetadata[PhoenixDatabaseMetaData.FUNTION_NAME_INDEX];
    byte[] functionKey = SchemaUtil.getFunctionKey(tenantIdBytes, functionBytes);
    final MetaDataMutationResult result = metaDataCoprocessorExec(functionKey, new Batch.Call<MetaDataService, MetaDataResponse>() {

        @Override
        public MetaDataResponse call(MetaDataService instance) throws IOException {
            ServerRpcController controller = new ServerRpcController();
            BlockingRpcCallback<MetaDataResponse> rpcCallback = new BlockingRpcCallback<MetaDataResponse>();
            DropFunctionRequest.Builder builder = DropFunctionRequest.newBuilder();
            for (Mutation m : functionData) {
                MutationProto mp = ProtobufUtil.toProto(m);
                builder.addTableMetadataMutations(mp.toByteString());
            }
            builder.setIfExists(ifExists);
            builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER));
            instance.dropFunction(controller, builder.build(), rpcCallback);
            if (controller.getFailedOn() != null) {
                throw controller.getFailedOn();
            }
            return rpcCallback.get();
        }
    }, PhoenixDatabaseMetaData.SYSTEM_FUNCTION_NAME_BYTES);
    return result;
}
Also used : MetaDataResponse(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse) KeyValueBuilder(org.apache.phoenix.hbase.index.util.KeyValueBuilder) NonTxIndexBuilder(org.apache.phoenix.hbase.index.covered.NonTxIndexBuilder) PhoenixIndexBuilder(org.apache.phoenix.index.PhoenixIndexBuilder) IOException(java.io.IOException) PhoenixIOException(org.apache.phoenix.exception.PhoenixIOException) ServerRpcController(org.apache.hadoop.hbase.ipc.ServerRpcController) MutationProto(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto) MetaDataService(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService) Batch(org.apache.hadoop.hbase.client.coprocessor.Batch) BlockingRpcCallback(org.apache.hadoop.hbase.ipc.BlockingRpcCallback) Mutation(org.apache.hadoop.hbase.client.Mutation) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)

Aggregations

MetaDataResponse (org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse)21 Mutation (org.apache.hadoop.hbase.client.Mutation)16 IOException (java.io.IOException)14 BlockingRpcCallback (org.apache.hadoop.hbase.ipc.BlockingRpcCallback)12 ServerRpcController (org.apache.hadoop.hbase.ipc.ServerRpcController)12 MetaDataService (org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService)12 KeyValueBuilder (org.apache.phoenix.hbase.index.util.KeyValueBuilder)12 Batch (org.apache.hadoop.hbase.client.coprocessor.Batch)11 MutationProto (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto)11 PhoenixIOException (org.apache.phoenix.exception.PhoenixIOException)10 NonTxIndexBuilder (org.apache.phoenix.hbase.index.covered.NonTxIndexBuilder)9 PhoenixIndexBuilder (org.apache.phoenix.index.PhoenixIndexBuilder)9 Region (org.apache.hadoop.hbase.regionserver.Region)8 MetaDataMutationResult (org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)8 RowLock (org.apache.hadoop.hbase.regionserver.Region.RowLock)6 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)6 ByteString (com.google.protobuf.ByteString)5 ArrayList (java.util.ArrayList)5 PMetaDataEntity (org.apache.phoenix.schema.PMetaDataEntity)5 PTable (org.apache.phoenix.schema.PTable)5