Search in sources :

Example 16 with BlockingRpcCallback

use of org.apache.hadoop.hbase.ipc.BlockingRpcCallback in project phoenix by apache.

the class ConnectionQueryServicesImpl method createSchema.

@Override
public MetaDataMutationResult createSchema(final List<Mutation> schemaMutations, final String schemaName) throws SQLException {
    ensureNamespaceCreated(schemaName);
    Mutation m = MetaDataUtil.getPutOnlyTableHeaderRow(schemaMutations);
    byte[] key = m.getRow();
    MetaDataMutationResult result = metaDataCoprocessorExec(key, new Batch.Call<MetaDataService, MetaDataResponse>() {

        @Override
        public MetaDataResponse call(MetaDataService instance) throws IOException {
            ServerRpcController controller = new ServerRpcController();
            BlockingRpcCallback<MetaDataResponse> rpcCallback = new BlockingRpcCallback<MetaDataResponse>();
            CreateSchemaRequest.Builder builder = CreateSchemaRequest.newBuilder();
            for (Mutation m : schemaMutations) {
                MutationProto mp = ProtobufUtil.toProto(m);
                builder.addTableMetadataMutations(mp.toByteString());
            }
            builder.setSchemaName(schemaName);
            builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER));
            instance.createSchema(controller, builder.build(), rpcCallback);
            if (controller.getFailedOn() != null) {
                throw controller.getFailedOn();
            }
            return rpcCallback.get();
        }
    });
    return result;
}
Also used : MetaDataResponse(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse) KeyValueBuilder(org.apache.phoenix.hbase.index.util.KeyValueBuilder) NonTxIndexBuilder(org.apache.phoenix.hbase.index.covered.NonTxIndexBuilder) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) PhoenixIndexBuilder(org.apache.phoenix.index.PhoenixIndexBuilder) IOException(java.io.IOException) PhoenixIOException(org.apache.phoenix.exception.PhoenixIOException) ServerRpcController(org.apache.hadoop.hbase.ipc.ServerRpcController) MutationProto(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto) MetaDataService(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService) Batch(org.apache.hadoop.hbase.client.coprocessor.Batch) BlockingRpcCallback(org.apache.hadoop.hbase.ipc.BlockingRpcCallback) Mutation(org.apache.hadoop.hbase.client.Mutation) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)

Example 17 with BlockingRpcCallback

use of org.apache.hadoop.hbase.ipc.BlockingRpcCallback in project phoenix by apache.

the class ServerCacheClient method removeServerCache.

/**
     * Remove the cached table from all region servers
     * @param cacheId unique identifier for the hash join (returned from {@link #addHashCache(HTable, Scan, Set)})
     * @param servers list of servers upon which table was cached (filled in by {@link #addHashCache(HTable, Scan, Set)})
     * @throws SQLException
     * @throws IllegalStateException if hashed table cannot be removed on any region server on which it was added
     */
private void removeServerCache(final byte[] cacheId, Set<HRegionLocation> servers) throws SQLException {
    ConnectionQueryServices services = connection.getQueryServices();
    Throwable lastThrowable = null;
    TableRef cacheUsingTableRef = cacheUsingTableRefMap.get(Bytes.mapKey(cacheId));
    final PTable cacheUsingTable = cacheUsingTableRef.getTable();
    byte[] tableName = cacheUsingTableRef.getTable().getPhysicalName().getBytes();
    HTableInterface iterateOverTable = services.getTable(tableName);
    try {
        List<HRegionLocation> locations = services.getAllTableRegions(tableName);
        Set<HRegionLocation> remainingOnServers = new HashSet<HRegionLocation>(servers);
        /**
    		 * Allow for the possibility that the region we based where to send our cache has split and been
    		 * relocated to another region server *after* we sent it, but before we removed it. To accommodate
    		 * this, we iterate through the current metadata boundaries and remove the cache once for each
    		 * server that we originally sent to.
    		 */
        if (LOG.isDebugEnabled()) {
            LOG.debug(addCustomAnnotations("Removing Cache " + cacheId + " from servers.", connection));
        }
        for (HRegionLocation entry : locations) {
            if (remainingOnServers.contains(entry)) {
                // Call once per server
                try {
                    byte[] key = getKeyInRegion(entry.getRegionInfo().getStartKey());
                    iterateOverTable.coprocessorService(ServerCachingService.class, key, key, new Batch.Call<ServerCachingService, RemoveServerCacheResponse>() {

                        @Override
                        public RemoveServerCacheResponse call(ServerCachingService instance) throws IOException {
                            ServerRpcController controller = new ServerRpcController();
                            BlockingRpcCallback<RemoveServerCacheResponse> rpcCallback = new BlockingRpcCallback<RemoveServerCacheResponse>();
                            RemoveServerCacheRequest.Builder builder = RemoveServerCacheRequest.newBuilder();
                            final byte[] tenantIdBytes;
                            if (cacheUsingTable.isMultiTenant()) {
                                try {
                                    tenantIdBytes = connection.getTenantId() == null ? null : ScanUtil.getTenantIdBytes(cacheUsingTable.getRowKeySchema(), cacheUsingTable.getBucketNum() != null, connection.getTenantId(), cacheUsingTable.getViewIndexId() != null);
                                } catch (SQLException e) {
                                    throw new IOException(e);
                                }
                            } else {
                                tenantIdBytes = connection.getTenantId() == null ? null : connection.getTenantId().getBytes();
                            }
                            if (tenantIdBytes != null) {
                                builder.setTenantId(ByteStringer.wrap(tenantIdBytes));
                            }
                            builder.setCacheId(ByteStringer.wrap(cacheId));
                            instance.removeServerCache(controller, builder.build(), rpcCallback);
                            if (controller.getFailedOn() != null) {
                                throw controller.getFailedOn();
                            }
                            return rpcCallback.get();
                        }
                    });
                    remainingOnServers.remove(entry);
                } catch (Throwable t) {
                    lastThrowable = t;
                    LOG.error(addCustomAnnotations("Error trying to remove hash cache for " + entry, connection), t);
                }
            }
        }
        if (!remainingOnServers.isEmpty()) {
            LOG.warn(addCustomAnnotations("Unable to remove hash cache for " + remainingOnServers, connection), lastThrowable);
        }
    } finally {
        Closeables.closeQuietly(iterateOverTable);
    }
}
Also used : RemoveServerCacheResponse(org.apache.phoenix.coprocessor.generated.ServerCachingProtos.RemoveServerCacheResponse) SQLException(java.sql.SQLException) ServerCachingService(org.apache.phoenix.coprocessor.generated.ServerCachingProtos.ServerCachingService) IOException(java.io.IOException) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) ServerRpcController(org.apache.hadoop.hbase.ipc.ServerRpcController) PTable(org.apache.phoenix.schema.PTable) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) Batch(org.apache.hadoop.hbase.client.coprocessor.Batch) BlockingRpcCallback(org.apache.hadoop.hbase.ipc.BlockingRpcCallback) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices) TableRef(org.apache.phoenix.schema.TableRef) HashSet(java.util.HashSet)

Aggregations

BlockingRpcCallback (org.apache.hadoop.hbase.ipc.BlockingRpcCallback)17 ServerRpcController (org.apache.hadoop.hbase.ipc.ServerRpcController)17 IOException (java.io.IOException)16 Batch (org.apache.hadoop.hbase.client.coprocessor.Batch)15 MetaDataService (org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService)15 KeyValueBuilder (org.apache.phoenix.hbase.index.util.KeyValueBuilder)13 ThreadFactoryBuilder (com.google.common.util.concurrent.ThreadFactoryBuilder)12 PhoenixIOException (org.apache.phoenix.exception.PhoenixIOException)12 NonTxIndexBuilder (org.apache.phoenix.hbase.index.covered.NonTxIndexBuilder)12 PhoenixIndexBuilder (org.apache.phoenix.index.PhoenixIndexBuilder)12 MetaDataResponse (org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse)11 Mutation (org.apache.hadoop.hbase.client.Mutation)10 MutationProto (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto)10 MetaDataMutationResult (org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)8 HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)6 SQLException (java.sql.SQLException)5 HashMap (java.util.HashMap)4 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)4 ImmutableMap (com.google.common.collect.ImmutableMap)3 Map (java.util.Map)3