use of org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler in project hbase by apache.
the class HBaseFsck method suggestFixes.
/**
* Suggest fixes for each table
*/
private void suggestFixes(SortedMap<TableName, TableInfo> tablesInfo) throws IOException {
logParallelMerge();
for (TableInfo tInfo : tablesInfo.values()) {
TableIntegrityErrorHandler handler = tInfo.new IntegrityFixSuggester(tInfo, errors);
tInfo.checkRegionChain(handler);
}
}
use of org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler in project hbase by apache.
the class HBaseFsck method checkIntegrity.
/**
* Checks tables integrity. Goes over all regions and scans the tables.
* Collects all the pieces for each table and checks if there are missing,
* repeated or overlapping ones.
* @throws IOException
*/
SortedMap<TableName, HbckTableInfo> checkIntegrity() throws IOException {
tablesInfo = new TreeMap<>();
LOG.debug("There are " + regionInfoMap.size() + " region info entries");
for (HbckRegionInfo hbi : regionInfoMap.values()) {
// Check only valid, working regions
if (hbi.getMetaEntry() == null) {
// this assumes that consistency check has run loadMetaEntry
Path p = hbi.getHdfsRegionDir();
if (p == null) {
errors.report("No regioninfo in Meta or HDFS. " + hbi);
}
// TODO test.
continue;
}
if (hbi.getMetaEntry().regionServer == null) {
errors.detail("Skipping region because no region server: " + hbi);
continue;
}
if (hbi.getMetaEntry().getRegionInfo().isOffline()) {
errors.detail("Skipping region because it is offline: " + hbi);
continue;
}
if (hbi.containsOnlyHdfsEdits()) {
errors.detail("Skipping region because it only contains edits" + hbi);
continue;
}
// if (hbi.deployedOn.size() != 1) continue;
if (hbi.getDeployedOn().isEmpty()) {
continue;
}
// We should be safe here
TableName tableName = hbi.getMetaEntry().getRegionInfo().getTable();
HbckTableInfo modTInfo = tablesInfo.get(tableName);
if (modTInfo == null) {
modTInfo = new HbckTableInfo(tableName, this);
}
for (ServerName server : hbi.getDeployedOn()) {
modTInfo.addServer(server);
}
if (!hbi.isSkipChecks()) {
modTInfo.addRegionInfo(hbi);
}
tablesInfo.put(tableName, modTInfo);
}
loadTableInfosForTablesWithNoRegion();
logParallelMerge();
for (HbckTableInfo tInfo : tablesInfo.values()) {
TableIntegrityErrorHandler handler = tInfo.new IntegrityFixSuggester(tInfo, errors);
if (!tInfo.checkRegionChain(handler)) {
errors.report("Found inconsistency in table " + tInfo.getName());
}
}
return tablesInfo;
}
use of org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler in project hbase by apache.
the class HBaseFsck method checkHdfsIntegrity.
private SortedMap<TableName, HbckTableInfo> checkHdfsIntegrity(boolean fixHoles, boolean fixOverlaps) throws IOException {
LOG.info("Checking HBase region split map from HDFS data...");
logParallelMerge();
for (HbckTableInfo tInfo : tablesInfo.values()) {
TableIntegrityErrorHandler handler;
if (fixHoles || fixOverlaps) {
handler = tInfo.new HDFSIntegrityFixer(tInfo, errors, getConf(), fixHoles, fixOverlaps);
} else {
handler = tInfo.new IntegrityFixSuggester(tInfo, errors);
}
if (!tInfo.checkRegionChain(handler)) {
// should dump info as well.
errors.report("Found inconsistency in table " + tInfo.getName());
}
}
return tablesInfo;
}
Aggregations