use of org.apache.hadoop.hbase.client.ScannerTimeoutException in project cdap by caskdata.
the class HBaseTableTest method testScannerCache.
@Test
public void testScannerCache() throws Exception {
String tableName = "scanCache";
// note: it appears that HBase only enforces the scanner timeout after 10 seconds.
// setting it to 3 seconds does not mean it will actually fsail after 3 sweconds.
// therefore we have to cross the 10 seconds. here: 1200 times 10ms sleep.
int numRows = 1200;
DatasetAdmin admin = getTableAdmin(CONTEXT1, tableName);
admin.create();
try {
// write some rows and commit
Transaction tx1 = txClient.startShort();
Table myTable1 = getTable(CONTEXT1, tableName);
((TransactionAware) myTable1).startTx(tx1);
for (int i = 0; i < numRows; i++) {
myTable1.put(new Put("" + i, "x", "y"));
}
txClient.canCommitOrThrow(tx1, ((TransactionAware) myTable1).getTxChanges());
Assert.assertTrue(((TransactionAware) myTable1).commitTx());
txClient.commitOrThrow(tx1);
try {
testScannerCache(numRows, tableName, null, null, null);
Assert.fail("this should have failed with ScannerTimeoutException");
} catch (Exception e) {
// we expect a RuntimeException wrapping an HBase ScannerTimeoutException
if (!(e.getCause() instanceof ScannerTimeoutException)) {
throw e;
}
}
// cache=100 as dataset property
testScannerCache(numRows, tableName, "100", null, null);
// cache=100 as dataset runtime argument
testScannerCache(numRows, tableName, "1000", "100", null);
// cache=100 as scan property
testScannerCache(numRows, tableName, "5000", "1000", "100");
} finally {
admin.drop();
}
}
Aggregations