use of org.apache.hadoop.hbase.util.HBaseFsck in project hbase by apache.
the class IntegrationTestDDLMasterFailover method runTest.
private int runTest() throws Exception {
LOG.info("Starting the test");
String runtimeKey = String.format(RUN_TIME_KEY, this.getClass().getSimpleName());
long runtime = util.getConfiguration().getLong(runtimeKey, DEFAULT_RUN_TIME);
String numThreadKey = String.format(NUM_THREADS_KEY, this.getClass().getSimpleName());
numThreads = util.getConfiguration().getInt(numThreadKey, DEFAULT_NUM_THREADS);
ArrayList<Worker> workers = new ArrayList<>(numThreads);
for (int i = 0; i < numThreads; i++) {
checkException(workers);
Worker worker = new Worker();
LOG.info("Launching worker thread " + worker.getName());
workers.add(worker);
worker.start();
}
Threads.sleep(runtime / 2);
LOG.info("Stopping creating new tables");
create_table.set(false);
Threads.sleep(runtime / 2);
LOG.info("Runtime is up");
running.set(false);
checkException(workers);
for (Worker worker : workers) {
worker.join();
}
LOG.info("All Worker threads stopped");
// verify
LOG.info("Verify actions of all threads succeeded");
checkException(workers);
LOG.info("Verify namespaces");
verifyNamespaces();
LOG.info("Verify states of all tables");
verifyTables();
// RUN HBCK
HBaseFsck hbck = null;
try {
LOG.info("Running hbck");
hbck = HbckTestingUtil.doFsck(util.getConfiguration(), false);
if (HbckTestingUtil.inconsistencyFound(hbck)) {
// Find the inconsistency during HBCK. Leave table and namespace undropped so that
// we can check outside the test.
keepObjectsAtTheEnd = true;
}
HbckTestingUtil.assertNoErrors(hbck);
LOG.info("Finished hbck");
} finally {
if (hbck != null) {
hbck.close();
}
}
return 0;
}
use of org.apache.hadoop.hbase.util.HBaseFsck in project hbase by apache.
the class TestMetaWithReplicas method testHBaseFsckWithFewerMetaReplicaZnodes.
@Test
public void testHBaseFsckWithFewerMetaReplicaZnodes() throws Exception {
ClusterConnection c = (ClusterConnection) ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
RegionLocations rl = c.locateRegion(TableName.META_TABLE_NAME, HConstants.EMPTY_START_ROW, false, false);
HBaseFsckRepair.closeRegionSilentlyAndWait(c, rl.getRegionLocation(2).getServerName(), rl.getRegionLocation(2).getRegionInfo());
ZooKeeperWatcher zkw = TEST_UTIL.getZooKeeperWatcher();
ZKUtil.deleteNode(zkw, zkw.znodePaths.getZNodeForReplica(2));
// check that problem exists
HBaseFsck hbck = doFsck(TEST_UTIL.getConfiguration(), false);
assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.UNKNOWN, ERROR_CODE.NO_META_REGION });
// fix the problem
hbck = doFsck(TEST_UTIL.getConfiguration(), true);
// run hbck again to make sure we don't see any errors
hbck = doFsck(TEST_UTIL.getConfiguration(), false);
assertErrors(hbck, new ERROR_CODE[] {});
}
use of org.apache.hadoop.hbase.util.HBaseFsck in project hbase by apache.
the class TestMetaWithReplicas method testHBaseFsckWithMetaReplicas.
@Test
public void testHBaseFsckWithMetaReplicas() throws Exception {
HBaseFsck hbck = HbckTestingUtil.doFsck(TEST_UTIL.getConfiguration(), false);
HbckTestingUtil.assertNoErrors(hbck);
}
use of org.apache.hadoop.hbase.util.HBaseFsck in project hbase by apache.
the class TestMetaWithReplicas method stopMasterAndValidateReplicaCount.
private void stopMasterAndValidateReplicaCount(final int originalReplicaCount, final int newReplicaCount) throws Exception {
ServerName sn = TEST_UTIL.getHBaseClusterInterface().getClusterStatus().getMaster();
TEST_UTIL.getHBaseClusterInterface().stopMaster(sn);
TEST_UTIL.getHBaseClusterInterface().waitForMasterToStop(sn, 60000);
List<String> metaZnodes = TEST_UTIL.getZooKeeperWatcher().getMetaReplicaNodes();
//we should have what was configured before
assert (metaZnodes.size() == originalReplicaCount);
TEST_UTIL.getHBaseClusterInterface().getConf().setInt(HConstants.META_REPLICAS_NUM, newReplicaCount);
TEST_UTIL.getHBaseClusterInterface().startMaster(sn.getHostname(), 0);
TEST_UTIL.getHBaseClusterInterface().waitForActiveAndReadyMaster();
TEST_UTIL.waitFor(10000, predicateMetaHasReplicas(newReplicaCount));
// also check if hbck returns without errors
TEST_UTIL.getConfiguration().setInt(HConstants.META_REPLICAS_NUM, newReplicaCount);
HBaseFsck hbck = HbckTestingUtil.doFsck(TEST_UTIL.getConfiguration(), false);
HbckTestingUtil.assertNoErrors(hbck);
}
use of org.apache.hadoop.hbase.util.HBaseFsck in project hbase by apache.
the class HbckTestingUtil method doFsck.
public static HBaseFsck doFsck(Configuration conf, boolean fixAssignments, boolean fixMeta, boolean fixHdfsHoles, boolean fixHdfsOverlaps, boolean fixHdfsOrphans, boolean fixTableOrphans, boolean fixVersionFile, boolean fixReferenceFiles, boolean fixHFileLinks, boolean fixEmptyMetaRegionInfo, boolean fixTableLocks, Boolean fixReplication, TableName table) throws Exception {
HBaseFsck fsck = new HBaseFsck(conf, exec);
try {
// i.e. -details
HBaseFsck.setDisplayFullReport();
fsck.setTimeLag(0);
fsck.setFixAssignments(fixAssignments);
fsck.setFixMeta(fixMeta);
fsck.setFixHdfsHoles(fixHdfsHoles);
fsck.setFixHdfsOverlaps(fixHdfsOverlaps);
fsck.setFixHdfsOrphans(fixHdfsOrphans);
fsck.setFixTableOrphans(fixTableOrphans);
fsck.setFixVersionFile(fixVersionFile);
fsck.setFixReferenceFiles(fixReferenceFiles);
fsck.setFixHFileLinks(fixHFileLinks);
fsck.setFixEmptyMetaCells(fixEmptyMetaRegionInfo);
fsck.setFixReplication(fixReplication);
if (table != null) {
fsck.includeTable(table);
}
// Parse command line flags before connecting, to grab the lock.
fsck.connect();
fsck.onlineHbck();
} finally {
fsck.close();
}
return fsck;
}
Aggregations