use of com.pingcap.tikv.region.TiRegion in project tispark by pingcap.
the class KVErrorHandler method handleResponseError.
// Referenced from TiDB
// store/tikv/region_request.go - onRegionError
/**
* @return true: client should retry
*/
@Override
public boolean handleResponseError(BackOffer backOffer, RespT resp) {
if (resp == null) {
String msg = String.format("Request Failed with unknown reason for region region [%s]", recv.getRegion());
logger.warn(msg);
return handleRequestError(backOffer, new GrpcException(msg));
}
// Region error handling logic
Errorpb.Error error = getRegionError(resp);
if (error != null) {
if (error.hasNotLeader()) {
// this error is reported from raftstore:
// peer of current request is not leader, the following might be its causes:
// 1. cache is outdated, region has changed its leader, can be solved by re-fetching from PD
// 2. leader of current region is missing, need to wait and then fetch region info from PD
long newStoreId = error.getNotLeader().getLeader().getStoreId();
boolean retry;
// update Leader here
logger.warn(String.format("NotLeader Error with region id %d and store id %d, new store id %d", recv.getRegion().getId(), recv.getRegion().getLeader().getStoreId(), newStoreId));
BackOffFunction.BackOffFuncType backOffFuncType;
// since issuing store = NO_LEADER_STORE_ID requests to pd will definitely fail.
if (newStoreId != NO_LEADER_STORE_ID) {
// If update leader fails, we need to fetch new region info from pd,
// and re-split key range for new region. Setting retry to false will
// stop retry and enter handleCopResponse logic, which would use RegionMiss
// backOff strategy to wait, fetch new region and re-split key range.
// onNotLeader is only needed when updateLeader succeeds, thus switch
// to a new store address.
TiRegion newRegion = this.regionManager.updateLeader(recv.getRegion(), newStoreId);
retry = newRegion != null && recv.onNotLeader(this.regionManager.getStoreById(newStoreId), newRegion);
if (!retry) {
notifyRegionStoreCacheInvalidate(recv.getRegion(), CacheInvalidateEvent.CacheType.LEADER);
}
backOffFuncType = BackOffFunction.BackOffFuncType.BoUpdateLeader;
} else {
logger.info(String.format("Received zero store id, from region %d try next time", recv.getRegion().getId()));
backOffFuncType = BackOffFunction.BackOffFuncType.BoRegionMiss;
retry = false;
}
if (!retry) {
this.regionManager.invalidateRegion(recv.getRegion());
}
backOffer.doBackOff(backOffFuncType, new GrpcException(error.toString()));
return retry;
} else if (error.hasStoreNotMatch()) {
// this error is reported from raftstore:
// store_id requested at the moment is inconsistent with that expected
// Solution:re-fetch from PD
long storeId = recv.getRegion().getLeader().getStoreId();
long actualStoreId = error.getStoreNotMatch().getActualStoreId();
logger.warn(String.format("Store Not Match happened with region id %d, store id %d, actual store id %d", recv.getRegion().getId(), storeId, actualStoreId));
invalidateRegionStoreCache(recv.getRegion());
recv.onStoreNotMatch(this.regionManager.getStoreById(storeId));
// throwing it out.
return false;
} else if (error.hasEpochNotMatch()) {
// this error is reported from raftstore:
// region has outdated version,please try later.
logger.warn(String.format("Stale Epoch encountered for region [%s]", recv.getRegion()));
this.regionManager.onRegionStale(recv.getRegion());
notifyRegionCacheInvalidate(recv.getRegion());
return false;
} else if (error.hasServerIsBusy()) {
// this error is reported from kv:
// will occur when write pressure is high. Please try later.
logger.warn(String.format("Server is busy for region [%s], reason: %s", recv.getRegion(), error.getServerIsBusy().getReason()));
backOffer.doBackOff(BackOffFunction.BackOffFuncType.BoServerBusy, new StatusRuntimeException(Status.fromCode(Status.Code.UNAVAILABLE).withDescription(error.toString())));
backOffer.doBackOff(BackOffFunction.BackOffFuncType.BoRegionMiss, new GrpcException(error.getMessage()));
return true;
} else if (error.hasRegionNotFound()) {
backOffer.doBackOff(BackOffFunction.BackOffFuncType.BoRegionMiss, new GrpcException(error.getMessage()));
this.regionManager.onRegionStale(recv.getRegion());
notifyRegionCacheInvalidate(recv.getRegion());
return false;
} else if (error.hasStaleCommand()) {
// this error is reported from raftstore:
// command outdated, please try later
logger.warn(String.format("Stale command for region [%s]", recv.getRegion()));
backOffer.doBackOff(BackOffFunction.BackOffFuncType.BoRegionMiss, new GrpcException(error.getMessage()));
return true;
} else if (error.hasRaftEntryTooLarge()) {
logger.warn(String.format("Raft too large for region [%s]", recv.getRegion()));
throw new StatusRuntimeException(Status.fromCode(Status.Code.UNAVAILABLE).withDescription(error.toString()));
} else if (error.hasKeyNotInRegion()) {
// this error is reported from raftstore:
// key requested is not in current region
// should not happen here.
ByteString invalidKey = error.getKeyNotInRegion().getKey();
logger.error(String.format("Key not in region [%s] for key [%s], this error should not happen here.", recv.getRegion(), KeyUtils.formatBytesUTF8(invalidKey)));
throw new StatusRuntimeException(Status.UNKNOWN.withDescription(error.toString()));
}
logger.warn(String.format("Unknown error %s for region [%s]", error, recv.getRegion()));
// For other errors, we only drop cache here.
// Upper level may split this task.
invalidateRegionStoreCache(recv.getRegion());
// retry if raft proposal is dropped, it indicates the store is in the middle of transition
if (error.getMessage().contains("Raft ProposalDropped") || error.getMessage().contains("is missing")) {
backOffer.doBackOff(BackOffFunction.BackOffFuncType.BoRegionMiss, new GrpcException(error.getMessage()));
return true;
}
}
boolean retry = false;
if (resp instanceof ScanResponse) {
List<KvPair> kvPairs = ((ScanResponse) resp).getPairsList();
List<Lock> locks = new ArrayList<>();
for (KvPair kvPair : kvPairs) {
if (kvPair.hasError()) {
Lock lock = AbstractLockResolverClient.extractLockFromKeyErr(kvPair.getError());
locks.add(lock);
}
}
if (!locks.isEmpty()) {
try {
resolveLocks(backOffer, locks);
retry = true;
} catch (KeyException e) {
logger.warn("Unable to handle KeyExceptions other than LockException", e);
}
}
} else {
// Key error handling logic
Kvrpcpb.KeyError keyError = getKeyError.apply(resp);
if (keyError != null) {
try {
Lock lock = AbstractLockResolverClient.extractLockFromKeyErr(keyError);
resolveLock(backOffer, lock);
retry = true;
} catch (KeyException e) {
logger.warn("Unable to handle KeyExceptions other than LockException", e);
}
}
}
return retry;
}
use of com.pingcap.tikv.region.TiRegion in project tispark by pingcap.
the class LockResolverClientV4 method resolveLockAsync.
/**
* resolveLockAsync resolves lock assuming it was locked using the async commit protocol.
*/
private void resolveLockAsync(BackOffer bo, Lock lock, TxnStatus status) {
AsyncResolveData resolveData = checkAllSecondaries(bo, lock, status);
status.setCommitTS(resolveData.getCommitTs());
resolveData.appendKey(lock.getPrimary());
Map<TiRegion, List<ByteString>> groupResult = groupKeysByRegion(this.regionManager, resolveData.getKeys(), bo);
logger.info(String.format("resolve async commit, startTS=%d, commitTS=%d", lock.getTxnID(), status.getCommitTS()));
ExecutorService executorService = Executors.newFixedThreadPool(conf.getKvClientConcurrency(), new ThreadFactoryBuilder().setDaemon(true).build());
ExecutorCompletionService<Boolean> completionService = new ExecutorCompletionService<>(executorService);
for (Map.Entry<TiRegion, List<ByteString>> entry : groupResult.entrySet()) {
TiRegion tiRegion = entry.getKey();
List<ByteString> keys = entry.getValue();
completionService.submit(() -> resolveRegionLocks(bo, lock, tiRegion, keys, status));
}
try {
for (int i = 0; i < groupResult.size(); i++) {
completionService.take().get();
}
} catch (InterruptedException e) {
logger.info("async commit recovery (sending ResolveLock) finished with errors", e);
Thread.currentThread().interrupt();
throw new TiKVException("Current thread interrupted.", e);
} catch (ExecutionException e) {
logger.info("async commit recovery (sending ResolveLock) finished with errors", e);
throw new TiKVException("Execution exception met.", e);
} catch (Throwable e) {
logger.info("async commit recovery (sending ResolveLock) finished with errors", e);
throw e;
} finally {
executorService.shutdownNow();
}
}
use of com.pingcap.tikv.region.TiRegion in project tispark by pingcap.
the class LockResolverClientV4 method checkAllSecondaries.
/**
* checkAllSecondaries checks the secondary locks of an async commit transaction to find out the
* final status of the transaction
*/
private AsyncResolveData checkAllSecondaries(BackOffer bo, Lock lock, TxnStatus status) {
AsyncResolveData shared = new AsyncResolveData(status.getPrimaryLock().getMinCommitTs(), new ArrayList<>(), false);
Map<TiRegion, List<ByteString>> groupResult = groupKeysByRegion(this.regionManager, status.getPrimaryLock().getSecondariesList(), bo);
ExecutorService executorService = Executors.newFixedThreadPool(conf.getKvClientConcurrency(), new ThreadFactoryBuilder().setDaemon(true).build());
ExecutorCompletionService<Boolean> completionService = new ExecutorCompletionService<>(executorService);
for (Map.Entry<TiRegion, List<ByteString>> entry : groupResult.entrySet()) {
TiRegion tiRegion = entry.getKey();
List<ByteString> keys = entry.getValue();
completionService.submit(() -> checkSecondaries(bo, lock.getTxnID(), keys, tiRegion, shared));
}
try {
for (int i = 0; i < groupResult.size(); i++) {
completionService.take().get();
}
return shared;
} catch (InterruptedException e) {
logger.info("async commit recovery (sending CheckSecondaryLocks) finished with errors", e);
Thread.currentThread().interrupt();
throw new TiKVException("Current thread interrupted.", e);
} catch (ExecutionException e) {
logger.info("async commit recovery (sending CheckSecondaryLocks) finished with errors", e);
throw new TiKVException("Execution exception met.", e);
} catch (Throwable e) {
logger.info("async commit recovery (sending CheckSecondaryLocks) finished with errors", e);
throw e;
} finally {
executorService.shutdownNow();
}
}
use of com.pingcap.tikv.region.TiRegion in project tispark by pingcap.
the class ConcreteScanIterator method loadCurrentRegionToCache.
@Override
TiRegion loadCurrentRegionToCache() throws GrpcException {
BackOffer backOffer = ConcreteBackOffer.newScannerNextMaxBackOff();
while (true) {
try (RegionStoreClient client = builder.build(startKey)) {
TiRegion region = client.getRegion();
if (limit <= 0) {
currentCache = null;
} else {
try {
int scanSize = Math.min(limit, conf.getScanBatchSize());
currentCache = client.scan(backOffer, startKey, scanSize, version);
} catch (final TiKVException e) {
backOffer.doBackOff(BackOffFunction.BackOffFuncType.BoRegionMiss, e);
continue;
}
}
return region;
}
}
}
use of com.pingcap.tikv.region.TiRegion in project tispark by pingcap.
the class ConcreteScanIterator method resolveCurrentLock.
private ByteString resolveCurrentLock(Kvrpcpb.KvPair current) {
logger.warn(String.format("resolve current key error %s", current.getError().toString()));
Pair<TiRegion, Metapb.Store> pair = builder.getRegionManager().getRegionStorePairByKey(current.getKey());
TiRegion region = pair.first;
Metapb.Store store = pair.second;
BackOffer backOffer = ConcreteBackOffer.newGetBackOff();
try (RegionStoreClient client = builder.build(region, store)) {
return client.get(backOffer, current.getKey(), version);
} catch (Exception e) {
throw new KeyException(current.getError());
}
}
Aggregations