use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.
the class MergeTableRegionsProcedure method serializeStateData.
@Override
protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException {
super.serializeStateData(serializer);
final MasterProcedureProtos.MergeTableRegionsStateData.Builder mergeTableRegionsMsg = MasterProcedureProtos.MergeTableRegionsStateData.newBuilder().setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser())).setMergedRegionInfo(ProtobufUtil.toRegionInfo(mergedRegion)).setForcible(force);
for (RegionInfo ri : regionsToMerge) {
mergeTableRegionsMsg.addRegionInfo(ProtobufUtil.toRegionInfo(ri));
}
serializer.serialize(mergeTableRegionsMsg.build());
}
use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.
the class MasterRpcServices method getRegionInfo.
/**
* Get RegionInfo from Master using content of RegionSpecifier as key.
* @return RegionInfo found by decoding <code>rs</code> or null if none found
*/
private RegionInfo getRegionInfo(HBaseProtos.RegionSpecifier rs) throws UnknownRegionException {
RegionInfo ri = null;
switch(rs.getType()) {
case REGION_NAME:
final byte[] regionName = rs.getValue().toByteArray();
ri = this.server.getAssignmentManager().getRegionInfo(regionName);
break;
case ENCODED_REGION_NAME:
String encodedRegionName = Bytes.toString(rs.getValue().toByteArray());
RegionState regionState = this.server.getAssignmentManager().getRegionStates().getRegionState(encodedRegionName);
ri = regionState == null ? this.server.getAssignmentManager().loadRegionFromMeta(encodedRegionName) : regionState.getRegion();
break;
default:
break;
}
return ri;
}
use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.
the class MasterRpcServices method unassignRegion.
@Override
public UnassignRegionResponse unassignRegion(RpcController controller, UnassignRegionRequest req) throws ServiceException {
try {
final byte[] regionName = req.getRegion().getValue().toByteArray();
RegionSpecifierType type = req.getRegion().getType();
UnassignRegionResponse urr = UnassignRegionResponse.newBuilder().build();
server.checkInitialized();
if (type != RegionSpecifierType.REGION_NAME) {
LOG.warn("unassignRegion specifier type: expected: " + RegionSpecifierType.REGION_NAME + " actual: " + type);
}
RegionStateNode rsn = server.getAssignmentManager().getRegionStates().getRegionStateNodeFromName(regionName);
if (rsn == null) {
throw new UnknownRegionException(Bytes.toString(regionName));
}
RegionInfo hri = rsn.getRegionInfo();
if (server.cpHost != null) {
server.cpHost.preUnassign(hri);
}
LOG.debug(server.getClientIdAuditPrefix() + " unassign " + hri.getRegionNameAsString() + " in current location if it is online");
server.getAssignmentManager().unassign(hri);
if (server.cpHost != null) {
server.cpHost.postUnassign(hri);
}
return urr;
} catch (IOException ioe) {
throw new ServiceException(ioe);
}
}
use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.
the class RegionPlacementMaintainer method updateAssignmentPlanToRegionServers.
/**
* Update the assignment plan to all the region servers
* @param plan
* @throws IOException
*/
private void updateAssignmentPlanToRegionServers(FavoredNodesPlan plan) throws IOException {
LOG.info("Start to update the region servers with the new assignment plan");
// Get the region to region server map
Map<ServerName, List<RegionInfo>> currentAssignment = this.getRegionAssignmentSnapshot().getRegionServerToRegionMap();
// track of the failed and succeeded updates
int succeededNum = 0;
Map<ServerName, Exception> failedUpdateMap = new HashMap<>();
for (Map.Entry<ServerName, List<RegionInfo>> entry : currentAssignment.entrySet()) {
List<Pair<RegionInfo, List<ServerName>>> regionUpdateInfos = new ArrayList<>();
try {
// Keep track of the favored updates for the current region server
FavoredNodesPlan singleServerPlan = null;
// Find out all the updates for the current region server
for (RegionInfo region : entry.getValue()) {
List<ServerName> favoredServerList = plan.getFavoredNodes(region);
if (favoredServerList != null && favoredServerList.size() == FavoredNodeAssignmentHelper.FAVORED_NODES_NUM) {
// Create the single server plan if necessary
if (singleServerPlan == null) {
singleServerPlan = new FavoredNodesPlan();
}
// Update the single server update
singleServerPlan.updateFavoredNodesMap(region, favoredServerList);
regionUpdateInfos.add(new Pair<>(region, favoredServerList));
}
}
if (singleServerPlan != null) {
// Update the current region server with its updated favored nodes
AsyncRegionServerAdmin rsAdmin = getConnection().getRegionServerAdmin(entry.getKey());
UpdateFavoredNodesRequest request = RequestConverter.buildUpdateFavoredNodesRequest(regionUpdateInfos);
UpdateFavoredNodesResponse updateFavoredNodesResponse = FutureUtils.get(rsAdmin.updateFavoredNodes(request));
LOG.info("Region server " + FutureUtils.get(rsAdmin.getServerInfo(RequestConverter.buildGetServerInfoRequest())).getServerInfo() + " has updated " + updateFavoredNodesResponse.getResponse() + " / " + singleServerPlan.size() + " regions with the assignment plan");
succeededNum++;
}
} catch (Exception e) {
failedUpdateMap.put(entry.getKey(), e);
}
}
// log the succeeded updates
LOG.info("Updated " + succeededNum + " region servers with " + "the new assignment plan");
// log the failed updates
int failedNum = failedUpdateMap.size();
if (failedNum != 0) {
LOG.error("Failed to update the following + " + failedNum + " region servers with its corresponding favored nodes");
for (Map.Entry<ServerName, Exception> entry : failedUpdateMap.entrySet()) {
LOG.error("Failed to update " + entry.getKey().getAddress() + " because of " + entry.getValue().getMessage());
}
}
}
use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.
the class RegionPlacementMaintainer method getRegionsMovement.
/**
* Return how many regions will move per table since their primary RS will
* change
*
* @param newPlan - new AssignmentPlan
* @return how many primaries will move per table
*/
public Map<TableName, Integer> getRegionsMovement(FavoredNodesPlan newPlan) throws IOException {
Map<TableName, Integer> movesPerTable = new HashMap<>();
SnapshotOfRegionAssignmentFromMeta snapshot = this.getRegionAssignmentSnapshot();
Map<TableName, List<RegionInfo>> tableToRegions = snapshot.getTableToRegionMap();
FavoredNodesPlan oldPlan = snapshot.getExistingAssignmentPlan();
Set<TableName> tables = snapshot.getTableSet();
for (TableName table : tables) {
int movedPrimaries = 0;
if (!this.targetTableSet.isEmpty() && !this.targetTableSet.contains(table)) {
continue;
}
List<RegionInfo> regions = tableToRegions.get(table);
for (RegionInfo region : regions) {
List<ServerName> oldServers = oldPlan.getFavoredNodes(region);
List<ServerName> newServers = newPlan.getFavoredNodes(region);
if (oldServers != null && newServers != null) {
ServerName oldPrimary = oldServers.get(0);
ServerName newPrimary = newServers.get(0);
if (oldPrimary.compareTo(newPrimary) != 0) {
movedPrimaries++;
}
}
}
movesPerTable.put(table, movedPrimaries);
}
return movesPerTable;
}
Aggregations