use of java.util.concurrent.ExecutionException in project hbase by apache.
the class TestAsyncNonMetaRegionLocator method testLocateAfter.
// usually locate after will return the same result, so we add a test to make it return different
// result.
@Test
public void testLocateAfter() throws IOException, InterruptedException, ExecutionException {
byte[] row = Bytes.toBytes("1");
byte[] splitKey = Arrays.copyOf(row, 2);
TEST_UTIL.createTable(TABLE_NAME, FAMILY, new byte[][] { splitKey });
TEST_UTIL.waitTableAvailable(TABLE_NAME);
HRegionLocation currentLoc = LOCATOR.getRegionLocation(TABLE_NAME, row, RegionLocateType.CURRENT).get();
ServerName currentServerName = TEST_UTIL.getRSForFirstRegionInTable(TABLE_NAME).getServerName();
assertLocEquals(EMPTY_START_ROW, splitKey, currentServerName, currentLoc);
HRegionLocation afterLoc = LOCATOR.getRegionLocation(TABLE_NAME, row, RegionLocateType.AFTER).get();
ServerName afterServerName = TEST_UTIL.getHBaseCluster().getRegionServerThreads().stream().map(t -> t.getRegionServer()).filter(rs -> rs.getOnlineRegions(TABLE_NAME).stream().anyMatch(r -> Bytes.equals(splitKey, r.getRegionInfo().getStartKey()))).findAny().get().getServerName();
assertLocEquals(splitKey, EMPTY_END_ROW, afterServerName, afterLoc);
assertSame(afterLoc, LOCATOR.getRegionLocation(TABLE_NAME, row, RegionLocateType.AFTER).get());
}
use of java.util.concurrent.ExecutionException in project hbase by apache.
the class RegionLocationFinder method getBlockDistribution.
public HDFSBlocksDistribution getBlockDistribution(HRegionInfo hri) {
HDFSBlocksDistribution blockDistbn = null;
try {
if (cache.asMap().containsKey(hri)) {
blockDistbn = cache.get(hri);
return blockDistbn;
} else {
LOG.debug("HDFSBlocksDistribution not found in cache for region " + hri.getRegionNameAsString());
blockDistbn = internalGetTopBlockLocation(hri);
cache.put(hri, blockDistbn);
return blockDistbn;
}
} catch (ExecutionException e) {
LOG.warn("Error while fetching cache entry ", e);
blockDistbn = internalGetTopBlockLocation(hri);
cache.put(hri, blockDistbn);
return blockDistbn;
}
}
use of java.util.concurrent.ExecutionException in project hbase by apache.
the class RpcRetryingCallerWithReadReplicas method call.
/**
* <p>
* Algo:
* - we put the query into the execution pool.
* - after x ms, if we don't have a result, we add the queries for the secondary replicas
* - we take the first answer
* - when done, we cancel what's left. Cancelling means:
* - removing from the pool if the actual call was not started
* - interrupting the call if it has started
* Client side, we need to take into account
* - a call is not executed immediately after being put into the pool
* - a call is a thread. Let's not multiply the number of thread by the number of replicas.
* Server side, if we can cancel when it's still in the handler pool, it's much better, as a call
* can take some i/o.
* </p>
* Globally, the number of retries, timeout and so on still applies, but it's per replica,
* not global. We continue until all retries are done, or all timeouts are exceeded.
*/
public Result call(int operationTimeout) throws DoNotRetryIOException, InterruptedIOException, RetriesExhaustedException {
boolean isTargetReplicaSpecified = (get.getReplicaId() >= 0);
RegionLocations rl = getRegionLocations(true, (isTargetReplicaSpecified ? get.getReplicaId() : RegionReplicaUtil.DEFAULT_REPLICA_ID), cConnection, tableName, get.getRow());
final ResultBoundedCompletionService<Result> cs = new ResultBoundedCompletionService<>(this.rpcRetryingCallerFactory, pool, rl.size());
int startIndex = 0;
int endIndex = rl.size();
if (isTargetReplicaSpecified) {
addCallsForReplica(cs, rl, get.getReplicaId(), get.getReplicaId());
endIndex = 1;
} else {
addCallsForReplica(cs, rl, 0, 0);
try {
// wait for the timeout to see whether the primary responds back
// Yes, microseconds
Future<Result> f = cs.poll(timeBeforeReplicas, TimeUnit.MICROSECONDS);
if (f != null) {
//great we got a response
return f.get();
}
} catch (ExecutionException e) {
// We ignore the ExecutionException and continue with the secondary replicas
if (LOG.isDebugEnabled()) {
LOG.debug("Primary replica returns " + e.getCause());
}
// Skip the result from the primary as we know that there is something wrong
startIndex = 1;
} catch (CancellationException e) {
throw new InterruptedIOException();
} catch (InterruptedException e) {
throw new InterruptedIOException();
}
// submit call for the all of the secondaries at once
addCallsForReplica(cs, rl, 1, rl.size() - 1);
}
try {
Future<Result> f = cs.pollForFirstSuccessfullyCompletedTask(operationTimeout, TimeUnit.MILLISECONDS, startIndex, endIndex);
if (f == null) {
throw new RetriesExhaustedException("timed out after " + operationTimeout + " ms");
}
return f.get();
} catch (ExecutionException e) {
throwEnrichedException(e, retries);
} catch (CancellationException e) {
throw new InterruptedIOException();
} catch (InterruptedException e) {
throw new InterruptedIOException();
} finally {
// We get there because we were interrupted or because one or more of the
// calls succeeded or failed. In all case, we stop all our tasks.
cs.cancelAll();
}
// unreachable
LOG.error("Imposible? Arrive at an unreachable line...");
// unreachable
return null;
}
use of java.util.concurrent.ExecutionException in project hbase by apache.
the class LoadIncrementalHFiles method bulkLoadPhase.
/**
* This takes the LQI's grouped by likely regions and attempts to bulk load
* them. Any failures are re-queued for another pass with the
* groupOrSplitPhase.
*/
protected void bulkLoadPhase(final Table table, final Connection conn, ExecutorService pool, Deque<LoadQueueItem> queue, final Multimap<ByteBuffer, LoadQueueItem> regionGroups, boolean copyFile, Map<LoadQueueItem, ByteBuffer> item2RegionMap) throws IOException {
// atomically bulk load the groups.
Set<Future<List<LoadQueueItem>>> loadingFutures = new HashSet<>();
for (Entry<ByteBuffer, ? extends Collection<LoadQueueItem>> e : regionGroups.asMap().entrySet()) {
final byte[] first = e.getKey().array();
final Collection<LoadQueueItem> lqis = e.getValue();
final ClientServiceCallable<byte[]> serviceCallable = buildClientServiceCallable(conn, table.getName(), first, lqis, copyFile);
final Callable<List<LoadQueueItem>> call = new Callable<List<LoadQueueItem>>() {
@Override
public List<LoadQueueItem> call() throws Exception {
List<LoadQueueItem> toRetry = tryAtomicRegionLoad(serviceCallable, table.getName(), first, lqis);
return toRetry;
}
};
if (item2RegionMap != null) {
for (LoadQueueItem lqi : lqis) {
item2RegionMap.put(lqi, e.getKey());
}
}
loadingFutures.add(pool.submit(call));
}
// get all the results.
for (Future<List<LoadQueueItem>> future : loadingFutures) {
try {
List<LoadQueueItem> toRetry = future.get();
if (item2RegionMap != null) {
for (LoadQueueItem lqi : toRetry) {
item2RegionMap.remove(lqi);
}
}
// LQIs that are requeued to be regrouped.
queue.addAll(toRetry);
} catch (ExecutionException e1) {
Throwable t = e1.getCause();
if (t instanceof IOException) {
// TODO Implement bulk load recovery
throw new IOException("BulkLoad encountered an unrecoverable problem", t);
}
LOG.error("Unexpected execution exception during bulk load", e1);
throw new IllegalStateException(t);
} catch (InterruptedException e1) {
LOG.error("Unexpected interrupted exception during bulk load", e1);
throw (InterruptedIOException) new InterruptedIOException().initCause(e1);
}
}
}
use of java.util.concurrent.ExecutionException in project hbase by apache.
the class HRegion method initializeStores.
/**
* Open all Stores.
* @param reporter
* @param status
* @return Highest sequenceId found out in a Store.
* @throws IOException
*/
private long initializeStores(final CancelableProgressable reporter, MonitoredTask status) throws IOException {
// Load in all the HStores.
long maxSeqId = -1;
// initialized to -1 so that we pick up MemstoreTS from column families
long maxMemstoreTS = -1;
if (!htableDescriptor.getFamilies().isEmpty()) {
// initialize the thread pool for opening stores in parallel.
ThreadPoolExecutor storeOpenerThreadPool = getStoreOpenAndCloseThreadPool("StoreOpener-" + this.getRegionInfo().getShortNameToLog());
CompletionService<HStore> completionService = new ExecutorCompletionService<>(storeOpenerThreadPool);
// initialize each store in parallel
for (final HColumnDescriptor family : htableDescriptor.getFamilies()) {
status.setStatus("Instantiating store for column family " + family);
completionService.submit(new Callable<HStore>() {
@Override
public HStore call() throws IOException {
return instantiateHStore(family);
}
});
}
boolean allStoresOpened = false;
boolean hasSloppyStores = false;
try {
for (int i = 0; i < htableDescriptor.getFamilies().size(); i++) {
Future<HStore> future = completionService.take();
HStore store = future.get();
this.stores.put(store.getFamily().getName(), store);
if (store.isSloppyMemstore()) {
hasSloppyStores = true;
}
long storeMaxSequenceId = store.getMaxSequenceId();
maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), storeMaxSequenceId);
if (maxSeqId == -1 || storeMaxSequenceId > maxSeqId) {
maxSeqId = storeMaxSequenceId;
}
long maxStoreMemstoreTS = store.getMaxMemstoreTS();
if (maxStoreMemstoreTS > maxMemstoreTS) {
maxMemstoreTS = maxStoreMemstoreTS;
}
}
allStoresOpened = true;
if (hasSloppyStores) {
htableDescriptor.setFlushPolicyClassName(FlushNonSloppyStoresFirstPolicy.class.getName());
LOG.info("Setting FlushNonSloppyStoresFirstPolicy for the region=" + this);
}
} catch (InterruptedException e) {
throw (InterruptedIOException) new InterruptedIOException().initCause(e);
} catch (ExecutionException e) {
throw new IOException(e.getCause());
} finally {
storeOpenerThreadPool.shutdownNow();
if (!allStoresOpened) {
// something went wrong, close all opened stores
LOG.error("Could not initialize all stores for the region=" + this);
for (Store store : this.stores.values()) {
try {
store.close();
} catch (IOException e) {
LOG.warn(e.getMessage());
}
}
}
}
}
return Math.max(maxSeqId, maxMemstoreTS + 1);
}
Aggregations