use of com.pingcap.tikv.exception.TiKVException in project tispark by pingcap.
the class LockResolverClientV4 method resolveLockAsync.
/**
* resolveLockAsync resolves lock assuming it was locked using the async commit protocol.
*/
private void resolveLockAsync(BackOffer bo, Lock lock, TxnStatus status) {
AsyncResolveData resolveData = checkAllSecondaries(bo, lock, status);
status.setCommitTS(resolveData.getCommitTs());
resolveData.appendKey(lock.getPrimary());
Map<TiRegion, List<ByteString>> groupResult = groupKeysByRegion(this.regionManager, resolveData.getKeys(), bo);
logger.info(String.format("resolve async commit, startTS=%d, commitTS=%d", lock.getTxnID(), status.getCommitTS()));
ExecutorService executorService = Executors.newFixedThreadPool(conf.getKvClientConcurrency(), new ThreadFactoryBuilder().setDaemon(true).build());
ExecutorCompletionService<Boolean> completionService = new ExecutorCompletionService<>(executorService);
for (Map.Entry<TiRegion, List<ByteString>> entry : groupResult.entrySet()) {
TiRegion tiRegion = entry.getKey();
List<ByteString> keys = entry.getValue();
completionService.submit(() -> resolveRegionLocks(bo, lock, tiRegion, keys, status));
}
try {
for (int i = 0; i < groupResult.size(); i++) {
completionService.take().get();
}
} catch (InterruptedException e) {
logger.info("async commit recovery (sending ResolveLock) finished with errors", e);
Thread.currentThread().interrupt();
throw new TiKVException("Current thread interrupted.", e);
} catch (ExecutionException e) {
logger.info("async commit recovery (sending ResolveLock) finished with errors", e);
throw new TiKVException("Execution exception met.", e);
} catch (Throwable e) {
logger.info("async commit recovery (sending ResolveLock) finished with errors", e);
throw e;
} finally {
executorService.shutdownNow();
}
}
use of com.pingcap.tikv.exception.TiKVException in project tispark by pingcap.
the class LockResolverClientV4 method checkAllSecondaries.
/**
* checkAllSecondaries checks the secondary locks of an async commit transaction to find out the
* final status of the transaction
*/
private AsyncResolveData checkAllSecondaries(BackOffer bo, Lock lock, TxnStatus status) {
AsyncResolveData shared = new AsyncResolveData(status.getPrimaryLock().getMinCommitTs(), new ArrayList<>(), false);
Map<TiRegion, List<ByteString>> groupResult = groupKeysByRegion(this.regionManager, status.getPrimaryLock().getSecondariesList(), bo);
ExecutorService executorService = Executors.newFixedThreadPool(conf.getKvClientConcurrency(), new ThreadFactoryBuilder().setDaemon(true).build());
ExecutorCompletionService<Boolean> completionService = new ExecutorCompletionService<>(executorService);
for (Map.Entry<TiRegion, List<ByteString>> entry : groupResult.entrySet()) {
TiRegion tiRegion = entry.getKey();
List<ByteString> keys = entry.getValue();
completionService.submit(() -> checkSecondaries(bo, lock.getTxnID(), keys, tiRegion, shared));
}
try {
for (int i = 0; i < groupResult.size(); i++) {
completionService.take().get();
}
return shared;
} catch (InterruptedException e) {
logger.info("async commit recovery (sending CheckSecondaryLocks) finished with errors", e);
Thread.currentThread().interrupt();
throw new TiKVException("Current thread interrupted.", e);
} catch (ExecutionException e) {
logger.info("async commit recovery (sending CheckSecondaryLocks) finished with errors", e);
throw new TiKVException("Execution exception met.", e);
} catch (Throwable e) {
logger.info("async commit recovery (sending CheckSecondaryLocks) finished with errors", e);
throw e;
} finally {
executorService.shutdownNow();
}
}
use of com.pingcap.tikv.exception.TiKVException in project tispark by pingcap.
the class ConcreteScanIterator method loadCurrentRegionToCache.
@Override
TiRegion loadCurrentRegionToCache() throws GrpcException {
BackOffer backOffer = ConcreteBackOffer.newScannerNextMaxBackOff();
while (true) {
try (RegionStoreClient client = builder.build(startKey)) {
TiRegion region = client.getRegion();
if (limit <= 0) {
currentCache = null;
} else {
try {
int scanSize = Math.min(limit, conf.getScanBatchSize());
currentCache = client.scan(backOffer, startKey, scanSize, version);
} catch (final TiKVException e) {
backOffer.doBackOff(BackOffFunction.BackOffFuncType.BoRegionMiss, e);
continue;
}
}
return region;
}
}
}
use of com.pingcap.tikv.exception.TiKVException in project tispark by pingcap.
the class KVClient method doSendBatchGetInBatchesWithRetry.
private Pair<List<Batch>, List<KvPair>> doSendBatchGetInBatchesWithRetry(BackOffer backOffer, Batch batch, long version) {
TiRegion oldRegion = batch.getRegion();
TiRegion currentRegion = clientBuilder.getRegionManager().getRegionByKey(batch.getRegion().getStartKey());
if (oldRegion.equals(currentRegion)) {
RegionStoreClient client = clientBuilder.build(batch.getRegion());
try {
List<KvPair> partialResult = client.batchGet(backOffer, batch.getKeys(), version);
return Pair.create(new ArrayList<>(), partialResult);
} catch (final TiKVException e) {
backOffer.doBackOff(BackOffFunction.BackOffFuncType.BoRegionMiss, e);
clientBuilder.getRegionManager().invalidateRegion(batch.getRegion());
logger.debug("ReSplitting ranges for BatchGetRequest", e);
return doRetryBatchGet(backOffer, batch);
}
} else {
return doRetryBatchGet(backOffer, batch);
}
}
use of com.pingcap.tikv.exception.TiKVException in project tispark by pingcap.
the class TiSession method splitRegion.
private List<TiRegion> splitRegion(List<ByteString> splitKeys, BackOffer backOffer) {
List<TiRegion> regions = new ArrayList<>();
Map<TiRegion, List<ByteString>> groupKeys = groupKeysByRegion(regionManager, splitKeys, backOffer);
for (Map.Entry<TiRegion, List<ByteString>> entry : groupKeys.entrySet()) {
Pair<TiRegion, Metapb.Store> pair = getRegionManager().getRegionStorePairByKey(entry.getKey().getStartKey());
TiRegion region = pair.first;
Metapb.Store store = pair.second;
List<ByteString> splits = entry.getValue().stream().filter(k -> !k.equals(region.getStartKey()) && !k.equals(region.getEndKey())).collect(Collectors.toList());
if (splits.isEmpty()) {
logger.warn("split key equal to region start key or end key. Region splitting is not needed.");
} else {
logger.info("start to split region id={}, split size={}", region.getId(), splits.size());
List<TiRegion> newRegions;
try {
newRegions = getRegionStoreClientBuilder().build(region, store).splitRegion(splits);
} catch (final TiKVException e) {
// retry
logger.warn("ReSplitting ranges for splitRegion", e);
clientBuilder.getRegionManager().invalidateRegion(region);
backOffer.doBackOff(BackOffFunction.BackOffFuncType.BoRegionMiss, e);
newRegions = splitRegion(splits, backOffer);
}
logger.info("region id={}, new region size={}", region.getId(), newRegions.size());
regions.addAll(newRegions);
}
}
logger.info("splitRegion: return region size={}", regions.size());
return regions;
}
Aggregations