Search in sources :

Example 1 with HbckRegionInfo

use of org.apache.hadoop.hbase.util.HbckRegionInfo in project hbase by apache.

the class HbckChore method loadRegionsFromFS.

private void loadRegionsFromFS(final HashSet<String> mergedParentRegions) throws IOException {
    Path rootDir = master.getMasterFileSystem().getRootDir();
    FileSystem fs = master.getMasterFileSystem().getFileSystem();
    int numRegions = 0;
    List<Path> tableDirs = FSUtils.getTableDirs(fs, rootDir);
    for (Path tableDir : tableDirs) {
        List<Path> regionDirs = FSUtils.getRegionDirs(fs, tableDir);
        for (Path regionDir : regionDirs) {
            String encodedRegionName = regionDir.getName();
            if (encodedRegionName == null) {
                LOG.warn("Failed get of encoded name from {}", regionDir);
                continue;
            }
            HbckRegionInfo hri = regionInfoMap.get(encodedRegionName);
            // report it as an orphan region.
            if (hri == null && !mergedParentRegions.contains(encodedRegionName)) {
                orphanRegionsOnFS.put(encodedRegionName, regionDir);
                continue;
            }
        }
        numRegions += regionDirs.size();
    }
    LOG.info("Loaded {} tables {} regions from filesystem and found {} orphan regions", tableDirs.size(), numRegions, orphanRegionsOnFS.size());
}
Also used : Path(org.apache.hadoop.fs.Path) FileSystem(org.apache.hadoop.fs.FileSystem) HbckRegionInfo(org.apache.hadoop.hbase.util.HbckRegionInfo)

Example 2 with HbckRegionInfo

use of org.apache.hadoop.hbase.util.HbckRegionInfo in project hbase by apache.

the class HbckChore method loadRegionsFromInMemoryState.

private void loadRegionsFromInMemoryState() {
    List<RegionState> regionStates = master.getAssignmentManager().getRegionStates().getRegionStates();
    for (RegionState regionState : regionStates) {
        RegionInfo regionInfo = regionState.getRegion();
        if (master.getTableStateManager().isTableState(regionInfo.getTable(), TableState.State.DISABLED)) {
            disabledTableRegions.add(regionInfo.getRegionNameAsString());
        }
        // Check both state and regioninfo for split status, see HBASE-26383
        if (regionState.isSplit() || regionInfo.isSplit()) {
            splitParentRegions.add(regionInfo.getRegionNameAsString());
        }
        HbckRegionInfo.MetaEntry metaEntry = new HbckRegionInfo.MetaEntry(regionInfo, regionState.getServerName(), regionState.getStamp());
        regionInfoMap.put(regionInfo.getEncodedName(), new HbckRegionInfo(metaEntry));
    }
    LOG.info("Loaded {} regions ({} disabled, {} split parents) from in-memory state", regionStates.size(), disabledTableRegions.size(), splitParentRegions.size());
    if (LOG.isDebugEnabled()) {
        Map<RegionState.State, Integer> stateCountMap = new HashMap<>();
        for (RegionState regionState : regionStates) {
            stateCountMap.compute(regionState.getState(), (k, v) -> (v == null) ? 1 : v + 1);
        }
        StringBuffer sb = new StringBuffer();
        sb.append("Regions by state: ");
        stateCountMap.entrySet().forEach(e -> {
            sb.append(e.getKey());
            sb.append('=');
            sb.append(e.getValue());
            sb.append(' ');
        });
        LOG.debug(sb.toString());
    }
    if (LOG.isTraceEnabled()) {
        for (RegionState regionState : regionStates) {
            LOG.trace("{}: {}, serverName=", regionState.getRegion(), regionState.getState(), regionState.getServerName());
        }
    }
}
Also used : HashMap(java.util.HashMap) TableState(org.apache.hadoop.hbase.client.TableState) HbckRegionInfo(org.apache.hadoop.hbase.util.HbckRegionInfo) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) HbckRegionInfo(org.apache.hadoop.hbase.util.HbckRegionInfo)

Example 3 with HbckRegionInfo

use of org.apache.hadoop.hbase.util.HbckRegionInfo in project hbase by apache.

the class HbckChore method loadRegionsFromRSReport.

private void loadRegionsFromRSReport() {
    int numRegions = 0;
    Map<ServerName, Set<byte[]>> rsReports = master.getAssignmentManager().getRSReports();
    for (Map.Entry<ServerName, Set<byte[]>> entry : rsReports.entrySet()) {
        ServerName serverName = entry.getKey();
        for (byte[] regionName : entry.getValue()) {
            String encodedRegionName = RegionInfo.encodeRegionName(regionName);
            HbckRegionInfo hri = regionInfoMap.get(encodedRegionName);
            if (hri == null) {
                orphanRegionsOnRS.put(RegionInfo.getRegionNameAsString(regionName), serverName);
                continue;
            }
            hri.addServer(hri.getMetaEntry().getRegionInfo(), serverName);
        }
        numRegions += entry.getValue().size();
    }
    LOG.info("Loaded {} regions from {} regionservers' reports and found {} orphan regions", numRegions, rsReports.size(), orphanRegionsOnRS.size());
    for (Map.Entry<String, HbckRegionInfo> entry : regionInfoMap.entrySet()) {
        HbckRegionInfo hri = entry.getValue();
        ServerName locationInMeta = hri.getMetaEntry().getRegionServer();
        if (locationInMeta == null) {
            continue;
        }
        if (hri.getDeployedOn().size() == 0) {
            // skip the offline region which belong to disabled table.
            if (disabledTableRegions.contains(hri.getRegionNameAsString())) {
                continue;
            }
            // skip the split parent regions
            if (splitParentRegions.contains(hri.getRegionNameAsString())) {
                continue;
            }
            // Master thought this region opened, but no regionserver reported it.
            inconsistentRegions.put(hri.getRegionNameAsString(), new Pair<>(locationInMeta, new LinkedList<>()));
        } else if (hri.getDeployedOn().size() > 1) {
            // More than one regionserver reported opened this region
            inconsistentRegions.put(hri.getRegionNameAsString(), new Pair<>(locationInMeta, hri.getDeployedOn()));
        } else if (!hri.getDeployedOn().get(0).equals(locationInMeta)) {
            // Master thought this region opened on Server1, but regionserver reported Server2
            inconsistentRegions.put(hri.getRegionNameAsString(), new Pair<>(locationInMeta, hri.getDeployedOn()));
        }
    }
}
Also used : Set(java.util.Set) HashSet(java.util.HashSet) LinkedList(java.util.LinkedList) ServerName(org.apache.hadoop.hbase.ServerName) HashMap(java.util.HashMap) Map(java.util.Map) HbckRegionInfo(org.apache.hadoop.hbase.util.HbckRegionInfo) Pair(org.apache.hadoop.hbase.util.Pair)

Aggregations

HbckRegionInfo (org.apache.hadoop.hbase.util.HbckRegionInfo)3 HashMap (java.util.HashMap)2 HashSet (java.util.HashSet)1 LinkedList (java.util.LinkedList)1 Map (java.util.Map)1 Set (java.util.Set)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 Path (org.apache.hadoop.fs.Path)1 ServerName (org.apache.hadoop.hbase.ServerName)1 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)1 TableState (org.apache.hadoop.hbase.client.TableState)1 Pair (org.apache.hadoop.hbase.util.Pair)1