use of org.apache.hadoop.hbase.ServerName in project hbase by apache.
the class ServerManager method sendFavoredNodes.
public void sendFavoredNodes(final ServerName server, Map<HRegionInfo, List<ServerName>> favoredNodes) throws IOException {
AdminService.BlockingInterface admin = getRsAdmin(server);
if (admin == null) {
LOG.warn("Attempting to send favored nodes update rpc to server " + server.toString() + " failed because no RPC connection found to this server");
} else {
List<Pair<HRegionInfo, List<ServerName>>> regionUpdateInfos = new ArrayList<>();
for (Entry<HRegionInfo, List<ServerName>> entry : favoredNodes.entrySet()) {
regionUpdateInfos.add(new Pair<>(entry.getKey(), entry.getValue()));
}
UpdateFavoredNodesRequest request = RequestConverter.buildUpdateFavoredNodesRequest(regionUpdateInfos);
try {
admin.updateFavoredNodes(null, request);
} catch (ServiceException se) {
throw ProtobufUtil.getRemoteException(se);
}
}
}
use of org.apache.hadoop.hbase.ServerName in project hbase by apache.
the class ServerManager method findServerWithSameHostnamePortWithLock.
/**
* Assumes onlineServers is locked.
* @return ServerName with matching hostname and port.
*/
private ServerName findServerWithSameHostnamePortWithLock(final ServerName serverName) {
ServerName end = ServerName.valueOf(serverName.getHostname(), serverName.getPort(), Long.MAX_VALUE);
ServerName r = onlineServers.lowerKey(end);
if (r != null) {
if (ServerName.isSameHostnameAndPort(r, serverName)) {
return r;
}
}
return null;
}
use of org.apache.hadoop.hbase.ServerName in project hbase by apache.
the class ServerManager method createDestinationServersList.
/**
* Creates a list of possible destinations for a region. It contains the online servers, but not
* the draining or dying servers.
* @param serverToExclude can be null if there is no server to exclude
*/
public List<ServerName> createDestinationServersList(final ServerName serverToExclude) {
final List<ServerName> destServers = getOnlineServersList();
if (serverToExclude != null) {
destServers.remove(serverToExclude);
}
// Loop through the draining server list and remove them from the server list
final List<ServerName> drainingServersCopy = getDrainingServersList();
if (!drainingServersCopy.isEmpty()) {
for (final ServerName server : drainingServersCopy) {
destServers.remove(server);
}
}
// Remove the deadNotExpired servers from the server list.
removeDeadNotExpiredServers(destServers);
return destServers;
}
use of org.apache.hadoop.hbase.ServerName in project hbase by apache.
the class ServerManager method letRegionServersShutdown.
void letRegionServersShutdown() {
long previousLogTime = 0;
ServerName sn = master.getServerName();
ZooKeeperWatcher zkw = master.getZooKeeper();
int onlineServersCt;
while ((onlineServersCt = onlineServers.size()) > 0) {
if (System.currentTimeMillis() > (previousLogTime + 1000)) {
Set<ServerName> remainingServers = onlineServers.keySet();
synchronized (onlineServers) {
if (remainingServers.size() == 1 && remainingServers.contains(sn)) {
// Master will delete itself later.
return;
}
}
StringBuilder sb = new StringBuilder();
// It's ok here to not sync on onlineServers - merely logging
for (ServerName key : remainingServers) {
if (sb.length() > 0) {
sb.append(", ");
}
sb.append(key);
}
LOG.info("Waiting on regionserver(s) to go down " + sb.toString());
previousLogTime = System.currentTimeMillis();
}
try {
List<String> servers = getRegionServersInZK(zkw);
if (servers == null || servers.isEmpty() || (servers.size() == 1 && servers.contains(sn.toString()))) {
LOG.info("ZK shows there is only the master self online, exiting now");
// Master could have lost some ZK events, no need to wait more.
break;
}
} catch (KeeperException ke) {
LOG.warn("Failed to list regionservers", ke);
// ZK is malfunctioning, don't hang here
break;
}
synchronized (onlineServers) {
try {
if (onlineServersCt == onlineServers.size())
onlineServers.wait(100);
} catch (InterruptedException ignored) {
// continue
}
}
}
}
use of org.apache.hadoop.hbase.ServerName in project hbase by apache.
the class SplitLogManager method removeStaleRecoveringRegions.
/**
* It removes stale recovering regions under /hbase/recovering-regions/[encoded region name]
* during master initialization phase.
* @param failedServers A set of known failed servers
* @throws IOException
*/
void removeStaleRecoveringRegions(final Set<ServerName> failedServers) throws IOException, InterruptedIOException {
Set<String> knownFailedServers = new HashSet<>();
if (failedServers != null) {
for (ServerName tmpServerName : failedServers) {
knownFailedServers.add(tmpServerName.getServerName());
}
}
this.recoveringRegionLock.lock();
try {
getSplitLogManagerCoordination().removeStaleRecoveringRegions(knownFailedServers);
} finally {
this.recoveringRegionLock.unlock();
}
}
Aggregations