Search in sources :

Example 6 with UnknownScannerException

use of org.apache.hadoop.hbase.UnknownScannerException in project hbase by apache.

the class TestReplicationKillRS method loadTableAndKillRS.

/**
   * Load up 1 tables over 2 region servers and kill a source during
   * the upload. The failover happens internally.
   *
   * WARNING this test sometimes fails because of HBASE-3515
   *
   * @throws Exception
   */
public void loadTableAndKillRS(HBaseTestingUtility util) throws Exception {
    // killing the RS with hbase:meta can result into failed puts until we solve
    // IO fencing
    int rsToKill1 = util.getHBaseCluster().getServerWithMeta() == 0 ? 1 : 0;
    // Takes about 20 secs to run the full loading, kill around the middle
    Thread killer = killARegionServer(util, 5000, rsToKill1);
    LOG.info("Start loading table");
    int initialCount = utility1.loadTable(htable1, famName);
    LOG.info("Done loading table");
    killer.join(5000);
    LOG.info("Done waiting for threads");
    Result[] res;
    while (true) {
        try {
            Scan scan = new Scan();
            ResultScanner scanner = htable1.getScanner(scan);
            res = scanner.next(initialCount);
            scanner.close();
            break;
        } catch (UnknownScannerException ex) {
            LOG.info("Cluster wasn't ready yet, restarting scanner");
        }
    }
    // don't have IO fencing.
    if (res.length != initialCount) {
        LOG.warn("We lost some rows on the master cluster!");
        // We don't really expect the other cluster to have more rows
        initialCount = res.length;
    }
    int lastCount = 0;
    final long start = System.currentTimeMillis();
    int i = 0;
    while (true) {
        if (i == NB_RETRIES - 1) {
            fail("Waited too much time for queueFailover replication. " + "Waited " + (System.currentTimeMillis() - start) + "ms.");
        }
        Scan scan2 = new Scan();
        ResultScanner scanner2 = htable2.getScanner(scan2);
        Result[] res2 = scanner2.next(initialCount * 2);
        scanner2.close();
        if (res2.length < initialCount) {
            if (lastCount < res2.length) {
                // Don't increment timeout if we make progress
                i--;
            } else {
                i++;
            }
            lastCount = res2.length;
            LOG.info("Only got " + lastCount + " rows instead of " + initialCount + " current i=" + i);
            Thread.sleep(SLEEP_TIME * 2);
        } else {
            break;
        }
    }
}
Also used : ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) Scan(org.apache.hadoop.hbase.client.Scan) UnknownScannerException(org.apache.hadoop.hbase.UnknownScannerException) Result(org.apache.hadoop.hbase.client.Result)

Aggregations

UnknownScannerException (org.apache.hadoop.hbase.UnknownScannerException)6 IOException (java.io.IOException)4 InterruptedIOException (java.io.InterruptedIOException)3 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)3 HBaseIOException (org.apache.hadoop.hbase.HBaseIOException)3 NotServingRegionException (org.apache.hadoop.hbase.NotServingRegionException)3 Result (org.apache.hadoop.hbase.client.Result)3 UnknownHostException (java.net.UnknownHostException)2 ArrayList (java.util.ArrayList)2 Cell (org.apache.hadoop.hbase.Cell)2 Scan (org.apache.hadoop.hbase.client.Scan)2 ScannerResetException (org.apache.hadoop.hbase.exceptions.ScannerResetException)2 ByteString (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString)2 ScanResponse (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse)2 FileNotFoundException (java.io.FileNotFoundException)1 BindException (java.net.BindException)1 MutableObject (org.apache.commons.lang.mutable.MutableObject)1 DroppedSnapshotException (org.apache.hadoop.hbase.DroppedSnapshotException)1 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)1 HRegionLocation (org.apache.hadoop.hbase.HRegionLocation)1