Search in sources :

Example 26 with ServerName

use of org.apache.hadoop.hbase.ServerName in project hbase by apache.

the class MasterStatusServlet method doGet.

@Override
public void doGet(HttpServletRequest request, HttpServletResponse response) throws IOException {
    HMaster master = (HMaster) getServletContext().getAttribute(HMaster.MASTER);
    assert master != null : "No Master in context!";
    response.setContentType("text/html");
    Configuration conf = master.getConfiguration();
    Map<String, Integer> frags = getFragmentationInfo(master, conf);
    ServerName metaLocation = null;
    List<ServerName> servers = null;
    Set<ServerName> deadServers = null;
    if (master.isActiveMaster()) {
        metaLocation = getMetaLocationOrNull(master);
        ServerManager serverManager = master.getServerManager();
        if (serverManager != null) {
            deadServers = serverManager.getDeadServers().copyServerNames();
            servers = serverManager.getOnlineServersList();
        }
    }
    MasterStatusTmpl tmpl = new MasterStatusTmpl().setFrags(frags).setMetaLocation(metaLocation).setServers(servers).setDeadServers(deadServers).setCatalogJanitorEnabled(master.isCatalogJanitorEnabled());
    if (request.getParameter("filter") != null)
        tmpl.setFilter(request.getParameter("filter"));
    if (request.getParameter("format") != null)
        tmpl.setFormat(request.getParameter("format"));
    tmpl.render(response.getWriter(), master);
}
Also used : MasterStatusTmpl(org.apache.hadoop.hbase.tmpl.master.MasterStatusTmpl) Configuration(org.apache.hadoop.conf.Configuration) ServerName(org.apache.hadoop.hbase.ServerName)

Example 27 with ServerName

use of org.apache.hadoop.hbase.ServerName in project hbase by apache.

the class MasterWalManager method getFailedServersFromLogFolders.

/**
   * Inspect the log directory to find dead servers which need recovery work
   * @return A set of ServerNames which aren't running but still have WAL files left in file system
   */
Set<ServerName> getFailedServersFromLogFolders() {
    boolean retrySplitting = !conf.getBoolean("hbase.hlog.split.skip.errors", WALSplitter.SPLIT_SKIP_ERRORS_DEFAULT);
    Set<ServerName> serverNames = new HashSet<>();
    Path logsDirPath = new Path(this.rootDir, HConstants.HREGION_LOGDIR_NAME);
    do {
        if (services.isStopped()) {
            LOG.warn("Master stopped while trying to get failed servers.");
            break;
        }
        try {
            if (!this.fs.exists(logsDirPath))
                return serverNames;
            FileStatus[] logFolders = FSUtils.listStatus(this.fs, logsDirPath, null);
            // Get online servers after getting log folders to avoid log folder deletion of newly
            // checked in region servers . see HBASE-5916
            Set<ServerName> onlineServers = services.getServerManager().getOnlineServers().keySet();
            if (logFolders == null || logFolders.length == 0) {
                LOG.debug("No log files to split, proceeding...");
                return serverNames;
            }
            for (FileStatus status : logFolders) {
                FileStatus[] curLogFiles = FSUtils.listStatus(this.fs, status.getPath(), null);
                if (curLogFiles == null || curLogFiles.length == 0) {
                    // Empty log folder. No recovery needed
                    continue;
                }
                final ServerName serverName = AbstractFSWALProvider.getServerNameFromWALDirectoryName(status.getPath());
                if (null == serverName) {
                    LOG.warn("Log folder " + status.getPath() + " doesn't look like its name includes a " + "region server name; leaving in place. If you see later errors about missing " + "write ahead logs they may be saved in this location.");
                } else if (!onlineServers.contains(serverName)) {
                    LOG.info("Log folder " + status.getPath() + " doesn't belong " + "to a known region server, splitting");
                    serverNames.add(serverName);
                } else {
                    LOG.info("Log folder " + status.getPath() + " belongs to an existing region server");
                }
            }
            retrySplitting = false;
        } catch (IOException ioe) {
            LOG.warn("Failed getting failed servers to be recovered.", ioe);
            if (!checkFileSystem()) {
                LOG.warn("Bad Filesystem, exiting");
                Runtime.getRuntime().halt(1);
            }
            try {
                if (retrySplitting) {
                    Thread.sleep(conf.getInt("hbase.hlog.split.failure.retry.interval", 30 * 1000));
                }
            } catch (InterruptedException e) {
                LOG.warn("Interrupted, aborting since cannot return w/o splitting");
                Thread.currentThread().interrupt();
                retrySplitting = false;
                Runtime.getRuntime().halt(1);
            }
        }
    } while (retrySplitting);
    return serverNames;
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) ServerName(org.apache.hadoop.hbase.ServerName) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) HashSet(java.util.HashSet)

Example 28 with ServerName

use of org.apache.hadoop.hbase.ServerName in project hbase by apache.

the class RackManager method getRack.

/**
   * Same as {@link #getRack(ServerName)} except that a list is passed
   * @param servers list of servers we're requesting racks information for
   * @return list of racks for the given list of servers
   */
public List<String> getRack(List<ServerName> servers) {
    // just a note - switchMapping caches results (at least the implementation should unless the
    // resolution is really a lightweight process)
    List<String> serversAsString = new ArrayList<>(servers.size());
    for (ServerName server : servers) {
        serversAsString.add(server.getHostname());
    }
    List<String> racks = switchMapping.resolve(serversAsString);
    return racks;
}
Also used : ServerName(org.apache.hadoop.hbase.ServerName) ArrayList(java.util.ArrayList)

Example 29 with ServerName

use of org.apache.hadoop.hbase.ServerName in project hbase by apache.

the class RegionPlacementMaintainer method updateAssignmentPlanToRegionServers.

/**
   * Update the assignment plan to all the region servers
   * @param plan
   * @throws IOException
   */
private void updateAssignmentPlanToRegionServers(FavoredNodesPlan plan) throws IOException {
    LOG.info("Start to update the region servers with the new assignment plan");
    // Get the region to region server map
    Map<ServerName, List<HRegionInfo>> currentAssignment = this.getRegionAssignmentSnapshot().getRegionServerToRegionMap();
    // track of the failed and succeeded updates
    int succeededNum = 0;
    Map<ServerName, Exception> failedUpdateMap = new HashMap<>();
    for (Map.Entry<ServerName, List<HRegionInfo>> entry : currentAssignment.entrySet()) {
        List<Pair<HRegionInfo, List<ServerName>>> regionUpdateInfos = new ArrayList<>();
        try {
            // Keep track of the favored updates for the current region server
            FavoredNodesPlan singleServerPlan = null;
            // Find out all the updates for the current region server
            for (HRegionInfo region : entry.getValue()) {
                List<ServerName> favoredServerList = plan.getFavoredNodes(region);
                if (favoredServerList != null && favoredServerList.size() == FavoredNodeAssignmentHelper.FAVORED_NODES_NUM) {
                    // Create the single server plan if necessary
                    if (singleServerPlan == null) {
                        singleServerPlan = new FavoredNodesPlan();
                    }
                    // Update the single server update
                    singleServerPlan.updateFavoredNodesMap(region, favoredServerList);
                    regionUpdateInfos.add(new Pair<>(region, favoredServerList));
                }
            }
            if (singleServerPlan != null) {
                // Update the current region server with its updated favored nodes
                BlockingInterface currentRegionServer = ((ClusterConnection) this.connection).getAdmin(entry.getKey());
                UpdateFavoredNodesRequest request = RequestConverter.buildUpdateFavoredNodesRequest(regionUpdateInfos);
                UpdateFavoredNodesResponse updateFavoredNodesResponse = currentRegionServer.updateFavoredNodes(null, request);
                LOG.info("Region server " + ProtobufUtil.getServerInfo(null, currentRegionServer).getServerName() + " has updated " + updateFavoredNodesResponse.getResponse() + " / " + singleServerPlan.getAssignmentMap().size() + " regions with the assignment plan");
                succeededNum++;
            }
        } catch (Exception e) {
            failedUpdateMap.put(entry.getKey(), e);
        }
    }
    // log the succeeded updates
    LOG.info("Updated " + succeededNum + " region servers with " + "the new assignment plan");
    // log the failed updates
    int failedNum = failedUpdateMap.size();
    if (failedNum != 0) {
        LOG.error("Failed to update the following + " + failedNum + " region servers with its corresponding favored nodes");
        for (Map.Entry<ServerName, Exception> entry : failedUpdateMap.entrySet()) {
            LOG.error("Failed to update " + entry.getKey().getHostAndPort() + " because of " + entry.getValue().getMessage());
        }
    }
}
Also used : HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) FavoredNodesPlan(org.apache.hadoop.hbase.favored.FavoredNodesPlan) IOException(java.io.IOException) ParseException(org.apache.commons.cli.ParseException) UpdateFavoredNodesResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) UpdateFavoredNodesRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest) BlockingInterface(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface) ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection) ServerName(org.apache.hadoop.hbase.ServerName) ArrayList(java.util.ArrayList) List(java.util.List) HashMap(java.util.HashMap) Map(java.util.Map) TreeMap(java.util.TreeMap) Pair(org.apache.hadoop.hbase.util.Pair)

Example 30 with ServerName

use of org.apache.hadoop.hbase.ServerName in project hbase by apache.

the class RegionPlacementMaintainer method getRegionsMovement.

/**
   * Return how many regions will move per table since their primary RS will
   * change
   *
   * @param newPlan - new AssignmentPlan
   * @return how many primaries will move per table
   */
public Map<TableName, Integer> getRegionsMovement(FavoredNodesPlan newPlan) throws IOException {
    Map<TableName, Integer> movesPerTable = new HashMap<>();
    SnapshotOfRegionAssignmentFromMeta snapshot = this.getRegionAssignmentSnapshot();
    Map<TableName, List<HRegionInfo>> tableToRegions = snapshot.getTableToRegionMap();
    FavoredNodesPlan oldPlan = snapshot.getExistingAssignmentPlan();
    Set<TableName> tables = snapshot.getTableSet();
    for (TableName table : tables) {
        int movedPrimaries = 0;
        if (!this.targetTableSet.isEmpty() && !this.targetTableSet.contains(table)) {
            continue;
        }
        List<HRegionInfo> regions = tableToRegions.get(table);
        for (HRegionInfo region : regions) {
            List<ServerName> oldServers = oldPlan.getFavoredNodes(region);
            List<ServerName> newServers = newPlan.getFavoredNodes(region);
            if (oldServers != null && newServers != null) {
                ServerName oldPrimary = oldServers.get(0);
                ServerName newPrimary = newServers.get(0);
                if (oldPrimary.compareTo(newPrimary) != 0) {
                    movedPrimaries++;
                }
            }
        }
        movesPerTable.put(table, movedPrimaries);
    }
    return movesPerTable;
}
Also used : HashMap(java.util.HashMap) FavoredNodesPlan(org.apache.hadoop.hbase.favored.FavoredNodesPlan) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableName(org.apache.hadoop.hbase.TableName) ServerName(org.apache.hadoop.hbase.ServerName) ArrayList(java.util.ArrayList) List(java.util.List)

Aggregations

ServerName (org.apache.hadoop.hbase.ServerName)426 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)202 Test (org.junit.Test)163 ArrayList (java.util.ArrayList)97 TableName (org.apache.hadoop.hbase.TableName)89 IOException (java.io.IOException)87 HashMap (java.util.HashMap)81 List (java.util.List)72 Map (java.util.Map)54 HRegionLocation (org.apache.hadoop.hbase.HRegionLocation)45 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)34 Table (org.apache.hadoop.hbase.client.Table)33 HashSet (java.util.HashSet)32 TreeMap (java.util.TreeMap)31 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)29 Configuration (org.apache.hadoop.conf.Configuration)26 HRegionServer (org.apache.hadoop.hbase.regionserver.HRegionServer)26 Pair (org.apache.hadoop.hbase.util.Pair)24 KeeperException (org.apache.zookeeper.KeeperException)23 InterruptedIOException (java.io.InterruptedIOException)22