use of org.apache.hadoop.hbase.client.AsyncRegionServerAdmin in project hbase by apache.
the class RegionPlacementMaintainer method updateAssignmentPlanToRegionServers.
/**
* Update the assignment plan to all the region servers
* @param plan
* @throws IOException
*/
private void updateAssignmentPlanToRegionServers(FavoredNodesPlan plan) throws IOException {
LOG.info("Start to update the region servers with the new assignment plan");
// Get the region to region server map
Map<ServerName, List<RegionInfo>> currentAssignment = this.getRegionAssignmentSnapshot().getRegionServerToRegionMap();
// track of the failed and succeeded updates
int succeededNum = 0;
Map<ServerName, Exception> failedUpdateMap = new HashMap<>();
for (Map.Entry<ServerName, List<RegionInfo>> entry : currentAssignment.entrySet()) {
List<Pair<RegionInfo, List<ServerName>>> regionUpdateInfos = new ArrayList<>();
try {
// Keep track of the favored updates for the current region server
FavoredNodesPlan singleServerPlan = null;
// Find out all the updates for the current region server
for (RegionInfo region : entry.getValue()) {
List<ServerName> favoredServerList = plan.getFavoredNodes(region);
if (favoredServerList != null && favoredServerList.size() == FavoredNodeAssignmentHelper.FAVORED_NODES_NUM) {
// Create the single server plan if necessary
if (singleServerPlan == null) {
singleServerPlan = new FavoredNodesPlan();
}
// Update the single server update
singleServerPlan.updateFavoredNodesMap(region, favoredServerList);
regionUpdateInfos.add(new Pair<>(region, favoredServerList));
}
}
if (singleServerPlan != null) {
// Update the current region server with its updated favored nodes
AsyncRegionServerAdmin rsAdmin = getConnection().getRegionServerAdmin(entry.getKey());
UpdateFavoredNodesRequest request = RequestConverter.buildUpdateFavoredNodesRequest(regionUpdateInfos);
UpdateFavoredNodesResponse updateFavoredNodesResponse = FutureUtils.get(rsAdmin.updateFavoredNodes(request));
LOG.info("Region server " + FutureUtils.get(rsAdmin.getServerInfo(RequestConverter.buildGetServerInfoRequest())).getServerInfo() + " has updated " + updateFavoredNodesResponse.getResponse() + " / " + singleServerPlan.size() + " regions with the assignment plan");
succeededNum++;
}
} catch (Exception e) {
failedUpdateMap.put(entry.getKey(), e);
}
}
// log the succeeded updates
LOG.info("Updated " + succeededNum + " region servers with " + "the new assignment plan");
// log the failed updates
int failedNum = failedUpdateMap.size();
if (failedNum != 0) {
LOG.error("Failed to update the following + " + failedNum + " region servers with its corresponding favored nodes");
for (Map.Entry<ServerName, Exception> entry : failedUpdateMap.entrySet()) {
LOG.error("Failed to update " + entry.getKey().getAddress() + " because of " + entry.getValue().getMessage());
}
}
}
use of org.apache.hadoop.hbase.client.AsyncRegionServerAdmin in project hbase by apache.
the class AssignmentManagerUtil method getRegionInfoResponse.
static GetRegionInfoResponse getRegionInfoResponse(final MasterProcedureEnv env, final ServerName regionLocation, final RegionInfo hri, boolean includeBestSplitRow) throws IOException {
AsyncRegionServerAdmin admin = env.getMasterServices().getAsyncClusterConnection().getRegionServerAdmin(regionLocation);
GetRegionInfoRequest request = null;
if (includeBestSplitRow) {
request = RequestConverter.buildGetRegionInfoRequest(hri.getRegionName(), false, true);
} else {
request = RequestConverter.buildGetRegionInfoRequest(hri.getRegionName());
}
return FutureUtils.get(admin.getRegionInfo(request));
}
use of org.apache.hadoop.hbase.client.AsyncRegionServerAdmin in project hbase by apache.
the class HBaseInterClusterReplicationEndpoint method replicateEntries.
protected int replicateEntries(List<Entry> entries, int batchIndex, int timeout) throws IOException {
SinkPeer sinkPeer = null;
try {
int entriesHashCode = System.identityHashCode(entries);
if (LOG.isTraceEnabled()) {
long size = entries.stream().mapToLong(this::getEstimatedEntrySize).sum();
LOG.trace("{} Replicating batch {} of {} entries with total size {} bytes to {}", logPeerId(), entriesHashCode, entries.size(), size, replicationClusterId);
}
sinkPeer = getReplicationSink();
AsyncRegionServerAdmin rsAdmin = sinkPeer.getRegionServer();
try {
ReplicationProtobufUtil.replicateWALEntry(rsAdmin, entries.toArray(new Entry[entries.size()]), replicationClusterId, baseNamespaceDir, hfileArchiveDir, timeout);
if (LOG.isTraceEnabled()) {
LOG.trace("{} Completed replicating batch {}", logPeerId(), entriesHashCode);
}
} catch (IOException e) {
if (LOG.isTraceEnabled()) {
LOG.trace("{} Failed replicating batch {}", logPeerId(), entriesHashCode, e);
}
throw e;
}
reportSinkSuccess(sinkPeer);
} catch (IOException ioe) {
if (sinkPeer != null) {
reportBadSink(sinkPeer);
}
throw ioe;
}
return batchIndex;
}
use of org.apache.hadoop.hbase.client.AsyncRegionServerAdmin in project hbase by apache.
the class ServerManager method closeRegionSilentlyAndWait.
/**
* Contacts a region server and waits up to timeout ms
* to close the region. This bypasses the active hmaster.
* Pass -1 as timeout if you do not want to wait on result.
*/
public static void closeRegionSilentlyAndWait(AsyncClusterConnection connection, ServerName server, RegionInfo region, long timeout) throws IOException, InterruptedException {
AsyncRegionServerAdmin admin = connection.getRegionServerAdmin(server);
try {
FutureUtils.get(admin.closeRegion(ProtobufUtil.buildCloseRegionRequest(server, region.getRegionName())));
} catch (IOException e) {
LOG.warn("Exception when closing region: " + region.getRegionNameAsString(), e);
}
if (timeout < 0) {
return;
}
long expiration = timeout + EnvironmentEdgeManager.currentTime();
while (EnvironmentEdgeManager.currentTime() < expiration) {
try {
RegionInfo rsRegion = ProtobufUtil.toRegionInfo(FutureUtils.get(admin.getRegionInfo(RequestConverter.buildGetRegionInfoRequest(region.getRegionName()))).getRegionInfo());
if (rsRegion == null) {
return;
}
} catch (IOException ioe) {
if (ioe instanceof NotServingRegionException || (ioe instanceof RemoteWithExtrasException && ((RemoteWithExtrasException) ioe).unwrapRemoteException() instanceof NotServingRegionException)) {
// no need to retry again
return;
}
LOG.warn("Exception when retrieving regioninfo from: " + region.getRegionNameAsString(), ioe);
}
Thread.sleep(1000);
}
throw new IOException("Region " + region + " failed to close within" + " timeout " + timeout);
}
Aggregations