use of org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker in project hbase by apache.
the class TestHBaseFsckOneRS method testQuarantineMissingHFile.
/**
* This creates a table and simulates the race situation where a concurrent compaction or split
* has removed an hfile after the corruption checker learned about it.
*/
@Test(timeout = 180000)
public void testQuarantineMissingHFile() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
// inject a fault in the hfcc created.
final FileSystem fs = FileSystem.get(conf);
HBaseFsck hbck = new HBaseFsck(conf, hbfsckExecutorService) {
@Override
public HFileCorruptionChecker createHFileCorruptionChecker(boolean sidelineCorruptHFiles) throws IOException {
return new HFileCorruptionChecker(conf, executor, sidelineCorruptHFiles) {
AtomicBoolean attemptedFirstHFile = new AtomicBoolean(false);
@Override
protected void checkHFile(Path p) throws IOException {
if (attemptedFirstHFile.compareAndSet(false, true)) {
// make sure delete happened.
assertTrue(fs.delete(p, true));
}
super.checkHFile(p);
}
};
}
};
// 4 attempted, but 1 missing.
doQuarantineTest(tableName, hbck, 4, 0, 0, 0, 1);
hbck.close();
}
use of org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker in project hbase by apache.
the class TestHBaseFsckOneRS method testQuarantineCorruptHFile.
/**
* This creates a table and then corrupts an hfile. Hbck should quarantine the file.
*/
@Test(timeout = 180000)
public void testQuarantineCorruptHFile() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
try {
setupTable(tableName);
assertEquals(ROWKEYS.length, countRows());
// flush is async.
admin.flush(tableName);
FileSystem fs = FileSystem.get(conf);
Path hfile = getFlushedHFile(fs, tableName);
// Mess it up by leaving a hole in the assignment, meta, and hdfs data
admin.disableTable(tableName);
// create new corrupt file called deadbeef (valid hfile name)
Path corrupt = new Path(hfile.getParent(), "deadbeef");
TestHFile.truncateFile(fs, hfile, corrupt);
LOG.info("Created corrupted file " + corrupt);
HBaseFsck.debugLsr(conf, FSUtils.getRootDir(conf));
// we cannot enable here because enable never finished due to the corrupt region.
HBaseFsck res = HbckTestingUtil.doHFileQuarantine(conf, tableName);
assertEquals(res.getRetCode(), 0);
HFileCorruptionChecker hfcc = res.getHFilecorruptionChecker();
assertEquals(hfcc.getHFilesChecked(), 5);
assertEquals(hfcc.getCorrupted().size(), 1);
assertEquals(hfcc.getFailures().size(), 0);
assertEquals(hfcc.getQuarantined().size(), 1);
assertEquals(hfcc.getMissing().size(), 0);
// Its been fixed, verify that we can enable.
admin.enableTable(tableName);
} finally {
cleanupTable(tableName);
}
}
use of org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker in project hbase by apache.
the class BaseTestHBaseFsck method doQuarantineTest.
/**
* Test that use this should have a timeout, because this method could potentially wait forever.
*/
protected void doQuarantineTest(TableName table, HBaseFsck hbck, int check, int corrupt, int fail, int quar, int missing) throws Exception {
try {
setupTable(table);
assertEquals(ROWKEYS.length, countRows());
// flush is async.
admin.flush(table);
// Mess it up by leaving a hole in the assignment, meta, and hdfs data
admin.disableTable(table);
String[] args = { "-sidelineCorruptHFiles", "-repairHoles", "-ignorePreCheckPermission", table.getNameAsString() };
HBaseFsck res = hbck.exec(hbfsckExecutorService, args);
HFileCorruptionChecker hfcc = res.getHFilecorruptionChecker();
assertEquals(hfcc.getHFilesChecked(), check);
assertEquals(hfcc.getCorrupted().size(), corrupt);
assertEquals(hfcc.getFailures().size(), fail);
assertEquals(hfcc.getQuarantined().size(), quar);
assertEquals(hfcc.getMissing().size(), missing);
// its been fixed, verify that we can enable
admin.enableTableAsync(table);
while (!admin.isTableEnabled(table)) {
try {
Thread.sleep(250);
} catch (InterruptedException e) {
e.printStackTrace();
fail("Interrupted when trying to enable table " + table);
}
}
} finally {
cleanupTable(table);
}
}
Aggregations