use of org.apache.hadoop.hbase.ipc.ServerRpcController in project phoenix by apache.
the class ServerCacheClient method removeServerCache.
/**
* Remove the cached table from all region servers
* @param cacheId unique identifier for the hash join (returned from {@link #addHashCache(HTable, Scan, Set)})
* @param servers list of servers upon which table was cached (filled in by {@link #addHashCache(HTable, Scan, Set)})
* @throws SQLException
* @throws IllegalStateException if hashed table cannot be removed on any region server on which it was added
*/
private void removeServerCache(final byte[] cacheId, Set<HRegionLocation> servers) throws SQLException {
ConnectionQueryServices services = connection.getQueryServices();
Throwable lastThrowable = null;
TableRef cacheUsingTableRef = cacheUsingTableRefMap.get(Bytes.mapKey(cacheId));
final PTable cacheUsingTable = cacheUsingTableRef.getTable();
byte[] tableName = cacheUsingTableRef.getTable().getPhysicalName().getBytes();
HTableInterface iterateOverTable = services.getTable(tableName);
try {
List<HRegionLocation> locations = services.getAllTableRegions(tableName);
Set<HRegionLocation> remainingOnServers = new HashSet<HRegionLocation>(servers);
/**
* Allow for the possibility that the region we based where to send our cache has split and been
* relocated to another region server *after* we sent it, but before we removed it. To accommodate
* this, we iterate through the current metadata boundaries and remove the cache once for each
* server that we originally sent to.
*/
if (LOG.isDebugEnabled()) {
LOG.debug(addCustomAnnotations("Removing Cache " + cacheId + " from servers.", connection));
}
for (HRegionLocation entry : locations) {
if (remainingOnServers.contains(entry)) {
// Call once per server
try {
byte[] key = getKeyInRegion(entry.getRegionInfo().getStartKey());
iterateOverTable.coprocessorService(ServerCachingService.class, key, key, new Batch.Call<ServerCachingService, RemoveServerCacheResponse>() {
@Override
public RemoveServerCacheResponse call(ServerCachingService instance) throws IOException {
ServerRpcController controller = new ServerRpcController();
BlockingRpcCallback<RemoveServerCacheResponse> rpcCallback = new BlockingRpcCallback<RemoveServerCacheResponse>();
RemoveServerCacheRequest.Builder builder = RemoveServerCacheRequest.newBuilder();
final byte[] tenantIdBytes;
if (cacheUsingTable.isMultiTenant()) {
try {
tenantIdBytes = connection.getTenantId() == null ? null : ScanUtil.getTenantIdBytes(cacheUsingTable.getRowKeySchema(), cacheUsingTable.getBucketNum() != null, connection.getTenantId(), cacheUsingTable.getViewIndexId() != null);
} catch (SQLException e) {
throw new IOException(e);
}
} else {
tenantIdBytes = connection.getTenantId() == null ? null : connection.getTenantId().getBytes();
}
if (tenantIdBytes != null) {
builder.setTenantId(ByteStringer.wrap(tenantIdBytes));
}
builder.setCacheId(ByteStringer.wrap(cacheId));
instance.removeServerCache(controller, builder.build(), rpcCallback);
if (controller.getFailedOn() != null) {
throw controller.getFailedOn();
}
return rpcCallback.get();
}
});
remainingOnServers.remove(entry);
} catch (Throwable t) {
lastThrowable = t;
LOG.error(addCustomAnnotations("Error trying to remove hash cache for " + entry, connection), t);
}
}
}
if (!remainingOnServers.isEmpty()) {
LOG.warn(addCustomAnnotations("Unable to remove hash cache for " + remainingOnServers, connection), lastThrowable);
}
} finally {
Closeables.closeQuietly(iterateOverTable);
}
}
Aggregations