use of org.apache.hadoop.hbase.util.FSUtils.RegionDirFilter in project hbase by apache.
the class HFileCorruptionChecker method checkTableDir.
/**
* Check all the regiondirs in the specified tableDir
*
* @param tableDir
* path to a table
* @throws IOException
*/
void checkTableDir(Path tableDir) throws IOException {
List<FileStatus> rds = FSUtils.listStatusWithStatusFilter(fs, tableDir, new RegionDirFilter(fs));
if (rds == null) {
if (!fs.exists(tableDir)) {
LOG.warn("Table Directory " + tableDir + " does not exist. Likely due to concurrent delete. Skipping.");
missing.add(tableDir);
}
return;
}
// Parallelize check at the region dir level
List<RegionDirChecker> rdcs = new ArrayList<>(rds.size() + 1);
List<Future<Void>> rdFutures;
for (FileStatus rdFs : rds) {
Path rdDir = rdFs.getPath();
RegionDirChecker work = new RegionDirChecker(rdDir);
rdcs.add(work);
}
// add mob region
rdcs.add(createMobRegionDirChecker(tableDir));
// Submit and wait for completion
try {
rdFutures = executor.invokeAll(rdcs);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
LOG.warn("Region dirs checking interrupted!", ie);
return;
}
for (int i = 0; i < rdFutures.size(); i++) {
Future<Void> f = rdFutures.get(i);
try {
f.get();
} catch (ExecutionException e) {
LOG.warn("Failed to quarantine an HFile in regiondir " + rdcs.get(i).regionDir, e.getCause());
// rethrow IOExceptions
if (e.getCause() instanceof IOException) {
throw (IOException) e.getCause();
}
// rethrow RuntimeExceptions
if (e.getCause() instanceof RuntimeException) {
throw (RuntimeException) e.getCause();
}
// this should never happen
LOG.error("Unexpected exception encountered", e);
// bailing out.
return;
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
LOG.warn("Region dirs check interrupted!", ie);
// bailing out
return;
}
}
}
Aggregations