Search in sources :

Example 1 with AddServerCacheResponse

use of org.apache.phoenix.coprocessor.generated.ServerCachingProtos.AddServerCacheResponse in project phoenix by apache.

the class ServerCacheClient method addServerCache.

public ServerCache addServerCache(ScanRanges keyRanges, final ImmutableBytesWritable cachePtr, final byte[] txState, final ServerCacheFactory cacheFactory, final TableRef cacheUsingTableRef) throws SQLException {
    ConnectionQueryServices services = connection.getQueryServices();
    MemoryChunk chunk = services.getMemoryManager().allocate(cachePtr.getLength());
    List<Closeable> closeables = new ArrayList<Closeable>();
    closeables.add(chunk);
    ServerCache hashCacheSpec = null;
    SQLException firstException = null;
    final byte[] cacheId = generateId();
    /**
         * Execute EndPoint in parallel on each server to send compressed hash cache 
         */
    // TODO: generalize and package as a per region server EndPoint caller
    // (ideally this would be functionality provided by the coprocessor framework)
    boolean success = false;
    ExecutorService executor = services.getExecutor();
    List<Future<Boolean>> futures = Collections.emptyList();
    try {
        final PTable cacheUsingTable = cacheUsingTableRef.getTable();
        List<HRegionLocation> locations = services.getAllTableRegions(cacheUsingTable.getPhysicalName().getBytes());
        int nRegions = locations.size();
        // Size these based on worst case
        futures = new ArrayList<Future<Boolean>>(nRegions);
        Set<HRegionLocation> servers = new HashSet<HRegionLocation>(nRegions);
        for (HRegionLocation entry : locations) {
            // Keep track of servers we've sent to and only send once
            byte[] regionStartKey = entry.getRegionInfo().getStartKey();
            byte[] regionEndKey = entry.getRegionInfo().getEndKey();
            if (!servers.contains(entry) && keyRanges.intersectRegion(regionStartKey, regionEndKey, cacheUsingTable.getIndexType() == IndexType.LOCAL)) {
                // Call RPC once per server
                servers.add(entry);
                if (LOG.isDebugEnabled()) {
                    LOG.debug(addCustomAnnotations("Adding cache entry to be sent for " + entry, connection));
                }
                final byte[] key = getKeyInRegion(entry.getRegionInfo().getStartKey());
                final HTableInterface htable = services.getTable(cacheUsingTableRef.getTable().getPhysicalName().getBytes());
                closeables.add(htable);
                futures.add(executor.submit(new JobCallable<Boolean>() {

                    @Override
                    public Boolean call() throws Exception {
                        final Map<byte[], AddServerCacheResponse> results;
                        try {
                            results = htable.coprocessorService(ServerCachingService.class, key, key, new Batch.Call<ServerCachingService, AddServerCacheResponse>() {

                                @Override
                                public AddServerCacheResponse call(ServerCachingService instance) throws IOException {
                                    ServerRpcController controller = new ServerRpcController();
                                    BlockingRpcCallback<AddServerCacheResponse> rpcCallback = new BlockingRpcCallback<AddServerCacheResponse>();
                                    AddServerCacheRequest.Builder builder = AddServerCacheRequest.newBuilder();
                                    final byte[] tenantIdBytes;
                                    if (cacheUsingTable.isMultiTenant()) {
                                        try {
                                            tenantIdBytes = connection.getTenantId() == null ? null : ScanUtil.getTenantIdBytes(cacheUsingTable.getRowKeySchema(), cacheUsingTable.getBucketNum() != null, connection.getTenantId(), cacheUsingTable.getViewIndexId() != null);
                                        } catch (SQLException e) {
                                            throw new IOException(e);
                                        }
                                    } else {
                                        tenantIdBytes = connection.getTenantId() == null ? null : connection.getTenantId().getBytes();
                                    }
                                    if (tenantIdBytes != null) {
                                        builder.setTenantId(ByteStringer.wrap(tenantIdBytes));
                                    }
                                    builder.setCacheId(ByteStringer.wrap(cacheId));
                                    builder.setCachePtr(org.apache.phoenix.protobuf.ProtobufUtil.toProto(cachePtr));
                                    builder.setHasProtoBufIndexMaintainer(true);
                                    ServerCacheFactoryProtos.ServerCacheFactory.Builder svrCacheFactoryBuider = ServerCacheFactoryProtos.ServerCacheFactory.newBuilder();
                                    svrCacheFactoryBuider.setClassName(cacheFactory.getClass().getName());
                                    builder.setCacheFactory(svrCacheFactoryBuider.build());
                                    builder.setTxState(ByteStringer.wrap(txState));
                                    instance.addServerCache(controller, builder.build(), rpcCallback);
                                    if (controller.getFailedOn() != null) {
                                        throw controller.getFailedOn();
                                    }
                                    return rpcCallback.get();
                                }
                            });
                        } catch (Throwable t) {
                            throw new Exception(t);
                        }
                        if (results != null && results.size() == 1) {
                            return results.values().iterator().next().getReturn();
                        }
                        return false;
                    }

                    /**
                         * Defines the grouping for round robin behavior.  All threads spawned to process
                         * this scan will be grouped together and time sliced with other simultaneously
                         * executing parallel scans.
                         */
                    @Override
                    public Object getJobId() {
                        return ServerCacheClient.this;
                    }

                    @Override
                    public TaskExecutionMetricsHolder getTaskExecutionMetric() {
                        return NO_OP_INSTANCE;
                    }
                }));
            } else {
                if (LOG.isDebugEnabled()) {
                    LOG.debug(addCustomAnnotations("NOT adding cache entry to be sent for " + entry + " since one already exists for that entry", connection));
                }
            }
        }
        hashCacheSpec = new ServerCache(cacheId, servers, cachePtr.getLength());
        // Execute in parallel
        int timeoutMs = services.getProps().getInt(QueryServices.THREAD_TIMEOUT_MS_ATTRIB, QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS);
        for (Future<Boolean> future : futures) {
            future.get(timeoutMs, TimeUnit.MILLISECONDS);
        }
        cacheUsingTableRefMap.put(Bytes.mapKey(cacheId), cacheUsingTableRef);
        success = true;
    } catch (SQLException e) {
        firstException = e;
    } catch (Exception e) {
        firstException = new SQLException(e);
    } finally {
        try {
            if (!success) {
                SQLCloseables.closeAllQuietly(Collections.singletonList(hashCacheSpec));
                for (Future<Boolean> future : futures) {
                    future.cancel(true);
                }
            }
        } finally {
            try {
                Closeables.closeAll(closeables);
            } catch (IOException e) {
                if (firstException == null) {
                    firstException = new SQLException(e);
                }
            } finally {
                if (firstException != null) {
                    throw firstException;
                }
            }
        }
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug(addCustomAnnotations("Cache " + cacheId + " successfully added to servers.", connection));
    }
    return hashCacheSpec;
}
Also used : SQLException(java.sql.SQLException) SQLCloseable(org.apache.phoenix.util.SQLCloseable) Closeable(java.io.Closeable) ArrayList(java.util.ArrayList) JobCallable(org.apache.phoenix.job.JobManager.JobCallable) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) ServerRpcController(org.apache.hadoop.hbase.ipc.ServerRpcController) PTable(org.apache.phoenix.schema.PTable) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) BlockingRpcCallback(org.apache.hadoop.hbase.ipc.BlockingRpcCallback) HashSet(java.util.HashSet) ServerCachingService(org.apache.phoenix.coprocessor.generated.ServerCachingProtos.ServerCachingService) MemoryChunk(org.apache.phoenix.memory.MemoryManager.MemoryChunk) AddServerCacheRequest(org.apache.phoenix.coprocessor.generated.ServerCachingProtos.AddServerCacheRequest) IOException(java.io.IOException) SQLException(java.sql.SQLException) IOException(java.io.IOException) ExecutorService(java.util.concurrent.ExecutorService) Future(java.util.concurrent.Future) ServerCacheFactory(org.apache.phoenix.coprocessor.ServerCachingProtocol.ServerCacheFactory) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices) AddServerCacheResponse(org.apache.phoenix.coprocessor.generated.ServerCachingProtos.AddServerCacheResponse)

Example 2 with AddServerCacheResponse

use of org.apache.phoenix.coprocessor.generated.ServerCachingProtos.AddServerCacheResponse in project phoenix by apache.

the class ServerCacheClient method addServerCache.

public boolean addServerCache(HTableInterface htable, byte[] key, final PTable cacheUsingTable, final byte[] cacheId, final ImmutableBytesWritable cachePtr, final ServerCacheFactory cacheFactory, final byte[] txState) throws Exception {
    byte[] keyInRegion = getKeyInRegion(key);
    final Map<byte[], AddServerCacheResponse> results;
    try {
        results = htable.coprocessorService(ServerCachingService.class, keyInRegion, keyInRegion, new Batch.Call<ServerCachingService, AddServerCacheResponse>() {

            @Override
            public AddServerCacheResponse call(ServerCachingService instance) throws IOException {
                ServerRpcController controller = new ServerRpcController();
                BlockingRpcCallback<AddServerCacheResponse> rpcCallback = new BlockingRpcCallback<AddServerCacheResponse>();
                AddServerCacheRequest.Builder builder = AddServerCacheRequest.newBuilder();
                final byte[] tenantIdBytes;
                if (cacheUsingTable.isMultiTenant()) {
                    try {
                        tenantIdBytes = connection.getTenantId() == null ? null : ScanUtil.getTenantIdBytes(cacheUsingTable.getRowKeySchema(), cacheUsingTable.getBucketNum() != null, connection.getTenantId(), cacheUsingTable.getViewIndexId() != null);
                    } catch (SQLException e) {
                        throw new IOException(e);
                    }
                } else {
                    tenantIdBytes = connection.getTenantId() == null ? null : connection.getTenantId().getBytes();
                }
                if (tenantIdBytes != null) {
                    builder.setTenantId(ByteStringer.wrap(tenantIdBytes));
                }
                builder.setCacheId(ByteStringer.wrap(cacheId));
                builder.setCachePtr(org.apache.phoenix.protobuf.ProtobufUtil.toProto(cachePtr));
                builder.setHasProtoBufIndexMaintainer(true);
                ServerCacheFactoryProtos.ServerCacheFactory.Builder svrCacheFactoryBuider = ServerCacheFactoryProtos.ServerCacheFactory.newBuilder();
                svrCacheFactoryBuider.setClassName(cacheFactory.getClass().getName());
                builder.setCacheFactory(svrCacheFactoryBuider.build());
                builder.setTxState(ByteStringer.wrap(txState));
                builder.setClientVersion(MetaDataProtocol.PHOENIX_VERSION);
                instance.addServerCache(controller, builder.build(), rpcCallback);
                if (controller.getFailedOn() != null) {
                    throw controller.getFailedOn();
                }
                return rpcCallback.get();
            }
        });
    } catch (Throwable t) {
        throw new Exception(t);
    }
    if (results != null && results.size() == 1) {
        return results.values().iterator().next().getReturn();
    }
    return false;
}
Also used : SQLException(java.sql.SQLException) ServerCachingService(org.apache.phoenix.coprocessor.generated.ServerCachingProtos.ServerCachingService) AddServerCacheRequest(org.apache.phoenix.coprocessor.generated.ServerCachingProtos.AddServerCacheRequest) IOException(java.io.IOException) ServerRpcController(org.apache.hadoop.hbase.ipc.ServerRpcController) SQLException(java.sql.SQLException) IOException(java.io.IOException) InsufficientMemoryException(org.apache.phoenix.memory.InsufficientMemoryException) BlockingRpcCallback(org.apache.hadoop.hbase.ipc.BlockingRpcCallback) ServerCacheFactory(org.apache.phoenix.coprocessor.ServerCachingProtocol.ServerCacheFactory) AddServerCacheResponse(org.apache.phoenix.coprocessor.generated.ServerCachingProtos.AddServerCacheResponse)

Example 3 with AddServerCacheResponse

use of org.apache.phoenix.coprocessor.generated.ServerCachingProtos.AddServerCacheResponse in project phoenix by apache.

the class ServerCachingEndpointImpl method addServerCache.

@Override
public void addServerCache(RpcController controller, AddServerCacheRequest request, RpcCallback<AddServerCacheResponse> done) {
    ImmutableBytesPtr tenantId = null;
    if (request.hasTenantId()) {
        tenantId = new ImmutableBytesPtr(request.getTenantId().toByteArray());
    }
    TenantCache tenantCache = GlobalCache.getTenantCache(this.env, tenantId);
    ImmutableBytesWritable cachePtr = org.apache.phoenix.protobuf.ProtobufUtil.toImmutableBytesWritable(request.getCachePtr());
    byte[] txState = request.hasTxState() ? request.getTxState().toByteArray() : ByteUtil.EMPTY_BYTE_ARRAY;
    try {
        @SuppressWarnings("unchecked") Class<ServerCacheFactory> serverCacheFactoryClass = (Class<ServerCacheFactory>) Class.forName(request.getCacheFactory().getClassName());
        ServerCacheFactory cacheFactory = serverCacheFactoryClass.newInstance();
        tenantCache.addServerCache(new ImmutableBytesPtr(request.getCacheId().toByteArray()), cachePtr, txState, cacheFactory, request.hasHasProtoBufIndexMaintainer() && request.getHasProtoBufIndexMaintainer(), request.hasClientVersion() ? request.getClientVersion() : IndexMetaDataCache.UNKNOWN_CLIENT_VERSION);
    } catch (Throwable e) {
        ProtobufUtil.setControllerException(controller, ServerUtil.createIOException("Error when adding cache: ", e));
    }
    AddServerCacheResponse.Builder responseBuilder = AddServerCacheResponse.newBuilder();
    responseBuilder.setReturn(true);
    AddServerCacheResponse result = responseBuilder.build();
    done.run(result);
}
Also used : TenantCache(org.apache.phoenix.cache.TenantCache) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) ServerCacheFactory(org.apache.phoenix.coprocessor.ServerCachingProtocol.ServerCacheFactory) AddServerCacheResponse(org.apache.phoenix.coprocessor.generated.ServerCachingProtos.AddServerCacheResponse)

Aggregations

ServerCacheFactory (org.apache.phoenix.coprocessor.ServerCachingProtocol.ServerCacheFactory)3 AddServerCacheResponse (org.apache.phoenix.coprocessor.generated.ServerCachingProtos.AddServerCacheResponse)3 IOException (java.io.IOException)2 SQLException (java.sql.SQLException)2 BlockingRpcCallback (org.apache.hadoop.hbase.ipc.BlockingRpcCallback)2 ServerRpcController (org.apache.hadoop.hbase.ipc.ServerRpcController)2 AddServerCacheRequest (org.apache.phoenix.coprocessor.generated.ServerCachingProtos.AddServerCacheRequest)2 ServerCachingService (org.apache.phoenix.coprocessor.generated.ServerCachingProtos.ServerCachingService)2 Closeable (java.io.Closeable)1 ArrayList (java.util.ArrayList)1 HashSet (java.util.HashSet)1 ExecutorService (java.util.concurrent.ExecutorService)1 Future (java.util.concurrent.Future)1 HRegionLocation (org.apache.hadoop.hbase.HRegionLocation)1 HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)1 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)1 TenantCache (org.apache.phoenix.cache.TenantCache)1 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)1 JobCallable (org.apache.phoenix.job.JobManager.JobCallable)1 InsufficientMemoryException (org.apache.phoenix.memory.InsufficientMemoryException)1