use of org.apache.hadoop.hbase.UnknownScannerException in project hbase by apache.
the class TestReplicationKillRS method loadTableAndKillRS.
/**
* Load up 1 tables over 2 region servers and kill a source during
* the upload. The failover happens internally.
*
* WARNING this test sometimes fails because of HBASE-3515
*
* @throws Exception
*/
public void loadTableAndKillRS(HBaseTestingUtility util) throws Exception {
// killing the RS with hbase:meta can result into failed puts until we solve
// IO fencing
int rsToKill1 = util.getHBaseCluster().getServerWithMeta() == 0 ? 1 : 0;
// Takes about 20 secs to run the full loading, kill around the middle
Thread killer = killARegionServer(util, 5000, rsToKill1);
LOG.info("Start loading table");
int initialCount = utility1.loadTable(htable1, famName);
LOG.info("Done loading table");
killer.join(5000);
LOG.info("Done waiting for threads");
Result[] res;
while (true) {
try {
Scan scan = new Scan();
ResultScanner scanner = htable1.getScanner(scan);
res = scanner.next(initialCount);
scanner.close();
break;
} catch (UnknownScannerException ex) {
LOG.info("Cluster wasn't ready yet, restarting scanner");
}
}
// don't have IO fencing.
if (res.length != initialCount) {
LOG.warn("We lost some rows on the master cluster!");
// We don't really expect the other cluster to have more rows
initialCount = res.length;
}
int lastCount = 0;
final long start = System.currentTimeMillis();
int i = 0;
while (true) {
if (i == NB_RETRIES - 1) {
fail("Waited too much time for queueFailover replication. " + "Waited " + (System.currentTimeMillis() - start) + "ms.");
}
Scan scan2 = new Scan();
ResultScanner scanner2 = htable2.getScanner(scan2);
Result[] res2 = scanner2.next(initialCount * 2);
scanner2.close();
if (res2.length < initialCount) {
if (lastCount < res2.length) {
// Don't increment timeout if we make progress
i--;
} else {
i++;
}
lastCount = res2.length;
LOG.info("Only got " + lastCount + " rows instead of " + initialCount + " current i=" + i);
Thread.sleep(SLEEP_TIME * 2);
} else {
break;
}
}
}
Aggregations