use of org.apache.hadoop.hbase.client.AsyncClusterConnection in project hbase by apache.
the class TestBulkLoadHFilesSplitRecovery method testRetryOnIOException.
/**
* Test that shows that exception thrown from the RS side will result in the expected number of
* retries set by ${@link HConstants#HBASE_CLIENT_RETRIES_NUMBER} when
* ${@link BulkLoadHFiles#RETRY_ON_IO_EXCEPTION} is set
*/
@Test
public void testRetryOnIOException() throws Exception {
TableName table = TableName.valueOf(name.getMethodName());
AtomicInteger calls = new AtomicInteger(0);
setupTable(util.getConnection(), table, 10);
Configuration conf = new Configuration(util.getConfiguration());
conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2);
conf.setBoolean(BulkLoadHFiles.RETRY_ON_IO_EXCEPTION, true);
BulkLoadHFilesTool loader = new BulkLoadHFilesTool(conf) {
@Override
protected void bulkLoadPhase(AsyncClusterConnection conn, TableName tableName, Deque<LoadQueueItem> queue, Multimap<ByteBuffer, LoadQueueItem> regionGroups, boolean copyFiles, Map<LoadQueueItem, ByteBuffer> item2RegionMap) throws IOException {
if (calls.get() < conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER)) {
calls.incrementAndGet();
super.bulkLoadPhase(mockAndInjectError(conn), tableName, queue, regionGroups, copyFiles, item2RegionMap);
} else {
super.bulkLoadPhase(conn, tableName, queue, regionGroups, copyFiles, item2RegionMap);
}
}
};
Path dir = buildBulkFiles(table, 1);
loader.bulkLoad(table, dir);
assertEquals(calls.get(), 2);
}
use of org.apache.hadoop.hbase.client.AsyncClusterConnection in project hbase by apache.
the class TestBulkLoadHFilesSplitRecovery method testGroupOrSplitFailure.
/**
* This simulates an remote exception which should cause LIHF to exit with an exception.
*/
@Test(expected = IOException.class)
public void testGroupOrSplitFailure() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
setupTable(util.getConnection(), tableName, 10);
BulkLoadHFilesTool loader = new BulkLoadHFilesTool(util.getConfiguration()) {
private int i = 0;
@Override
protected Pair<List<LoadQueueItem>, String> groupOrSplit(AsyncClusterConnection conn, TableName tableName, Multimap<ByteBuffer, LoadQueueItem> regionGroups, LoadQueueItem item, List<Pair<byte[], byte[]>> startEndKeys) throws IOException {
i++;
if (i == 5) {
throw new IOException("failure");
}
return super.groupOrSplit(conn, tableName, regionGroups, item, startEndKeys);
}
};
// create HFiles for different column families
Path dir = buildBulkFiles(tableName, 1);
loader.bulkLoad(tableName, dir);
}
use of org.apache.hadoop.hbase.client.AsyncClusterConnection in project hbase by apache.
the class TestBulkLoadHFilesSplitRecovery method testGroupOrSplitPresplit.
/**
* This test splits a table and attempts to bulk load. The bulk import files should be split
* before atomically importing.
*/
@Test
public void testGroupOrSplitPresplit() throws Exception {
final TableName table = TableName.valueOf(name.getMethodName());
setupTable(util.getConnection(), table, 10);
populateTable(util.getConnection(), table, 1);
assertExpectedTable(util.getConnection(), table, ROWCOUNT, 1);
forceSplit(table);
final AtomicInteger countedLqis = new AtomicInteger();
BulkLoadHFilesTool loader = new BulkLoadHFilesTool(util.getConfiguration()) {
@Override
protected Pair<List<LoadQueueItem>, String> groupOrSplit(AsyncClusterConnection conn, TableName tableName, Multimap<ByteBuffer, LoadQueueItem> regionGroups, LoadQueueItem item, List<Pair<byte[], byte[]>> startEndKeys) throws IOException {
Pair<List<LoadQueueItem>, String> lqis = super.groupOrSplit(conn, tableName, regionGroups, item, startEndKeys);
if (lqis != null && lqis.getFirst() != null) {
countedLqis.addAndGet(lqis.getFirst().size());
}
return lqis;
}
};
// create HFiles for different column families
Path dir = buildBulkFiles(table, 2);
loader.bulkLoad(table, dir);
assertExpectedTable(util.getConnection(), table, ROWCOUNT, 2);
assertEquals(20, countedLqis.get());
}
use of org.apache.hadoop.hbase.client.AsyncClusterConnection in project hbase by apache.
the class TestBulkLoadHFiles method testBulkLoadByFamily.
@Test
public void testBulkLoadByFamily() throws Exception {
Path dir = util.getDataTestDirOnTestFS("testBulkLoadByFamily");
FileSystem fs = util.getTestFileSystem();
dir = dir.makeQualified(fs.getUri(), fs.getWorkingDirectory());
String tableName = tn.getMethodName();
String[] families = { "cf1", "cf2", "cf3" };
for (int i = 0; i < families.length; i++) {
byte[] from = Bytes.toBytes(i + "begin");
byte[] to = Bytes.toBytes(i + "end");
Path familyDir = new Path(dir, families[i]);
HFileTestUtil.createHFile(util.getConfiguration(), fs, new Path(familyDir, "hfile"), Bytes.toBytes(families[i]), QUALIFIER, from, to, 1000);
}
Table table = util.createTable(TableName.valueOf(tableName), families);
final AtomicInteger attmptedCalls = new AtomicInteger();
util.getConfiguration().setBoolean(BulkLoadHFilesTool.BULK_LOAD_HFILES_BY_FAMILY, true);
BulkLoadHFiles loader = new BulkLoadHFilesTool(util.getConfiguration()) {
@Override
protected CompletableFuture<Collection<LoadQueueItem>> tryAtomicRegionLoad(final AsyncClusterConnection conn, final TableName tableName, boolean copyFiles, final byte[] first, Collection<LoadQueueItem> lqis) {
attmptedCalls.incrementAndGet();
return super.tryAtomicRegionLoad(conn, tableName, copyFiles, first, lqis);
}
};
try {
loader.bulkLoad(table.getName(), dir);
assertEquals(families.length, attmptedCalls.get());
assertEquals(1000 * families.length, HBaseTestingUtil.countRows(table));
} finally {
if (null != table) {
table.close();
}
util.getConfiguration().setBoolean(BulkLoadHFilesTool.BULK_LOAD_HFILES_BY_FAMILY, false);
}
}
Aggregations