use of org.apache.hadoop.hbase.client.ClientServiceCallable in project hbase by apache.
the class LoadIncrementalHFiles method bulkLoadPhase.
/**
* This takes the LQI's grouped by likely regions and attempts to bulk load
* them. Any failures are re-queued for another pass with the
* groupOrSplitPhase.
*/
protected void bulkLoadPhase(final Table table, final Connection conn, ExecutorService pool, Deque<LoadQueueItem> queue, final Multimap<ByteBuffer, LoadQueueItem> regionGroups, boolean copyFile, Map<LoadQueueItem, ByteBuffer> item2RegionMap) throws IOException {
// atomically bulk load the groups.
Set<Future<List<LoadQueueItem>>> loadingFutures = new HashSet<>();
for (Entry<ByteBuffer, ? extends Collection<LoadQueueItem>> e : regionGroups.asMap().entrySet()) {
final byte[] first = e.getKey().array();
final Collection<LoadQueueItem> lqis = e.getValue();
final ClientServiceCallable<byte[]> serviceCallable = buildClientServiceCallable(conn, table.getName(), first, lqis, copyFile);
final Callable<List<LoadQueueItem>> call = new Callable<List<LoadQueueItem>>() {
@Override
public List<LoadQueueItem> call() throws Exception {
List<LoadQueueItem> toRetry = tryAtomicRegionLoad(serviceCallable, table.getName(), first, lqis);
return toRetry;
}
};
if (item2RegionMap != null) {
for (LoadQueueItem lqi : lqis) {
item2RegionMap.put(lqi, e.getKey());
}
}
loadingFutures.add(pool.submit(call));
}
// get all the results.
for (Future<List<LoadQueueItem>> future : loadingFutures) {
try {
List<LoadQueueItem> toRetry = future.get();
if (item2RegionMap != null) {
for (LoadQueueItem lqi : toRetry) {
item2RegionMap.remove(lqi);
}
}
// LQIs that are requeued to be regrouped.
queue.addAll(toRetry);
} catch (ExecutionException e1) {
Throwable t = e1.getCause();
if (t instanceof IOException) {
// TODO Implement bulk load recovery
throw new IOException("BulkLoad encountered an unrecoverable problem", t);
}
LOG.error("Unexpected execution exception during bulk load", e1);
throw new IllegalStateException(t);
} catch (InterruptedException e1) {
LOG.error("Unexpected interrupted exception during bulk load", e1);
throw (InterruptedIOException) new InterruptedIOException().initCause(e1);
}
}
}
use of org.apache.hadoop.hbase.client.ClientServiceCallable in project hbase by apache.
the class TestLoadIncrementalHFilesSplitRecovery method testBulkLoadPhaseFailure.
/**
* Test that shows that exception thrown from the RS side will result in an
* exception on the LIHFile client.
*/
@Test(expected = IOException.class, timeout = 120000)
public void testBulkLoadPhaseFailure() throws Exception {
final TableName table = TableName.valueOf(name.getMethodName());
final AtomicInteger attmptedCalls = new AtomicInteger();
final AtomicInteger failedCalls = new AtomicInteger();
util.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2);
try (Connection connection = ConnectionFactory.createConnection(util.getConfiguration())) {
setupTable(connection, table, 10);
LoadIncrementalHFiles lih = new LoadIncrementalHFiles(util.getConfiguration()) {
@Override
protected List<LoadQueueItem> tryAtomicRegionLoad(ClientServiceCallable<byte[]> serviceCallable, TableName tableName, final byte[] first, Collection<LoadQueueItem> lqis) throws IOException {
int i = attmptedCalls.incrementAndGet();
if (i == 1) {
Connection errConn;
try {
errConn = getMockedConnection(util.getConfiguration());
serviceCallable = this.buildClientServiceCallable(errConn, table, first, lqis, true);
} catch (Exception e) {
LOG.fatal("mocking cruft, should never happen", e);
throw new RuntimeException("mocking cruft, should never happen");
}
failedCalls.incrementAndGet();
return super.tryAtomicRegionLoad(serviceCallable, tableName, first, lqis);
}
return super.tryAtomicRegionLoad(serviceCallable, tableName, first, lqis);
}
};
try {
// create HFiles for different column families
Path dir = buildBulkFiles(table, 1);
try (Table t = connection.getTable(table);
RegionLocator locator = connection.getRegionLocator(table);
Admin admin = connection.getAdmin()) {
lih.doBulkLoad(dir, admin, t, locator);
}
} finally {
util.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
}
fail("doBulkLoad should have thrown an exception");
}
}
use of org.apache.hadoop.hbase.client.ClientServiceCallable in project hbase by apache.
the class TestLoadIncrementalHFilesSplitRecovery method testRetryOnIOException.
/**
* Test that shows that exception thrown from the RS side will result in the
* expected number of retries set by ${@link HConstants#HBASE_CLIENT_RETRIES_NUMBER}
* when ${@link LoadIncrementalHFiles#RETRY_ON_IO_EXCEPTION} is set
*/
@Test
public void testRetryOnIOException() throws Exception {
final TableName table = TableName.valueOf(name.getMethodName());
final AtomicInteger calls = new AtomicInteger(1);
final Connection conn = ConnectionFactory.createConnection(util.getConfiguration());
util.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2);
util.getConfiguration().setBoolean(LoadIncrementalHFiles.RETRY_ON_IO_EXCEPTION, true);
final LoadIncrementalHFiles lih = new LoadIncrementalHFiles(util.getConfiguration()) {
@Override
protected List<LoadQueueItem> tryAtomicRegionLoad(ClientServiceCallable<byte[]> serverCallable, TableName tableName, final byte[] first, Collection<LoadQueueItem> lqis) throws IOException {
if (calls.getAndIncrement() < util.getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER) - 1) {
ClientServiceCallable<byte[]> newServerCallable = new ClientServiceCallable<byte[]>(conn, tableName, first, new RpcControllerFactory(util.getConfiguration()).newController()) {
@Override
public byte[] rpcCall() throws Exception {
throw new IOException("Error calling something on RegionServer");
}
};
return super.tryAtomicRegionLoad(newServerCallable, tableName, first, lqis);
} else {
return super.tryAtomicRegionLoad(serverCallable, tableName, first, lqis);
}
}
};
setupTable(conn, table, 10);
Path dir = buildBulkFiles(table, 1);
lih.doBulkLoad(dir, conn.getAdmin(), conn.getTable(table), conn.getRegionLocator(table));
util.getConfiguration().setBoolean(LoadIncrementalHFiles.RETRY_ON_IO_EXCEPTION, false);
}
use of org.apache.hadoop.hbase.client.ClientServiceCallable in project hbase by apache.
the class LoadIncrementalHFiles method buildClientServiceCallable.
protected ClientServiceCallable<byte[]> buildClientServiceCallable(final Connection conn, TableName tableName, byte[] first, Collection<LoadQueueItem> lqis, boolean copyFile) {
final List<Pair<byte[], String>> famPaths = new ArrayList<>(lqis.size());
for (LoadQueueItem lqi : lqis) {
famPaths.add(Pair.newPair(lqi.family, lqi.hfilePath.toString()));
}
return new ClientServiceCallable<byte[]>(conn, tableName, first, rpcControllerFactory.newController()) {
@Override
protected byte[] rpcCall() throws Exception {
SecureBulkLoadClient secureClient = null;
boolean success = false;
try {
LOG.debug("Going to connect to server " + getLocation() + " for row " + Bytes.toStringBinary(getRow()) + " with hfile group " + LoadIncrementalHFiles.this.toString(famPaths));
byte[] regionName = getLocation().getRegionInfo().getRegionName();
try (Table table = conn.getTable(getTableName())) {
secureClient = new SecureBulkLoadClient(getConf(), table);
success = secureClient.secureBulkLoadHFiles(getStub(), famPaths, regionName, assignSeqIds, fsDelegationToken.getUserToken(), bulkToken, copyFile);
}
return success ? regionName : null;
} finally {
//in user directory
if (secureClient != null && !success) {
FileSystem targetFs = FileSystem.get(getConf());
// fs is the source filesystem
if (fs == null) {
fs = lqis.iterator().next().hfilePath.getFileSystem(getConf());
}
// because previously we moved them to the staging directory.
if (FSHDFSUtils.isSameHdfs(getConf(), fs, targetFs)) {
for (Pair<byte[], String> el : famPaths) {
Path hfileStagingPath = null;
Path hfileOrigPath = new Path(el.getSecond());
try {
hfileStagingPath = new Path(new Path(bulkToken, Bytes.toString(el.getFirst())), hfileOrigPath.getName());
if (targetFs.rename(hfileStagingPath, hfileOrigPath)) {
LOG.debug("Moved back file " + hfileOrigPath + " from " + hfileStagingPath);
} else if (targetFs.exists(hfileStagingPath)) {
LOG.debug("Unable to move back file " + hfileOrigPath + " from " + hfileStagingPath);
}
} catch (Exception ex) {
LOG.debug("Unable to move back file " + hfileOrigPath + " from " + hfileStagingPath, ex);
}
}
}
}
}
}
};
}
Aggregations