use of com.pingcap.tikv.txn.Lock in project tispark by pingcap.
the class KVErrorHandler method handleResponseError.
// Referenced from TiDB
// store/tikv/region_request.go - onRegionError
/**
* @return true: client should retry
*/
@Override
public boolean handleResponseError(BackOffer backOffer, RespT resp) {
if (resp == null) {
String msg = String.format("Request Failed with unknown reason for region region [%s]", recv.getRegion());
logger.warn(msg);
return handleRequestError(backOffer, new GrpcException(msg));
}
// Region error handling logic
Errorpb.Error error = getRegionError(resp);
if (error != null) {
if (error.hasNotLeader()) {
// this error is reported from raftstore:
// peer of current request is not leader, the following might be its causes:
// 1. cache is outdated, region has changed its leader, can be solved by re-fetching from PD
// 2. leader of current region is missing, need to wait and then fetch region info from PD
long newStoreId = error.getNotLeader().getLeader().getStoreId();
boolean retry;
// update Leader here
logger.warn(String.format("NotLeader Error with region id %d and store id %d, new store id %d", recv.getRegion().getId(), recv.getRegion().getLeader().getStoreId(), newStoreId));
BackOffFunction.BackOffFuncType backOffFuncType;
// since issuing store = NO_LEADER_STORE_ID requests to pd will definitely fail.
if (newStoreId != NO_LEADER_STORE_ID) {
// If update leader fails, we need to fetch new region info from pd,
// and re-split key range for new region. Setting retry to false will
// stop retry and enter handleCopResponse logic, which would use RegionMiss
// backOff strategy to wait, fetch new region and re-split key range.
// onNotLeader is only needed when updateLeader succeeds, thus switch
// to a new store address.
TiRegion newRegion = this.regionManager.updateLeader(recv.getRegion(), newStoreId);
retry = newRegion != null && recv.onNotLeader(this.regionManager.getStoreById(newStoreId), newRegion);
if (!retry) {
notifyRegionStoreCacheInvalidate(recv.getRegion(), CacheInvalidateEvent.CacheType.LEADER);
}
backOffFuncType = BackOffFunction.BackOffFuncType.BoUpdateLeader;
} else {
logger.info(String.format("Received zero store id, from region %d try next time", recv.getRegion().getId()));
backOffFuncType = BackOffFunction.BackOffFuncType.BoRegionMiss;
retry = false;
}
if (!retry) {
this.regionManager.invalidateRegion(recv.getRegion());
}
backOffer.doBackOff(backOffFuncType, new GrpcException(error.toString()));
return retry;
} else if (error.hasStoreNotMatch()) {
// this error is reported from raftstore:
// store_id requested at the moment is inconsistent with that expected
// Solution:re-fetch from PD
long storeId = recv.getRegion().getLeader().getStoreId();
long actualStoreId = error.getStoreNotMatch().getActualStoreId();
logger.warn(String.format("Store Not Match happened with region id %d, store id %d, actual store id %d", recv.getRegion().getId(), storeId, actualStoreId));
invalidateRegionStoreCache(recv.getRegion());
recv.onStoreNotMatch(this.regionManager.getStoreById(storeId));
// throwing it out.
return false;
} else if (error.hasEpochNotMatch()) {
// this error is reported from raftstore:
// region has outdated version,please try later.
logger.warn(String.format("Stale Epoch encountered for region [%s]", recv.getRegion()));
this.regionManager.onRegionStale(recv.getRegion());
notifyRegionCacheInvalidate(recv.getRegion());
return false;
} else if (error.hasServerIsBusy()) {
// this error is reported from kv:
// will occur when write pressure is high. Please try later.
logger.warn(String.format("Server is busy for region [%s], reason: %s", recv.getRegion(), error.getServerIsBusy().getReason()));
backOffer.doBackOff(BackOffFunction.BackOffFuncType.BoServerBusy, new StatusRuntimeException(Status.fromCode(Status.Code.UNAVAILABLE).withDescription(error.toString())));
backOffer.doBackOff(BackOffFunction.BackOffFuncType.BoRegionMiss, new GrpcException(error.getMessage()));
return true;
} else if (error.hasRegionNotFound()) {
backOffer.doBackOff(BackOffFunction.BackOffFuncType.BoRegionMiss, new GrpcException(error.getMessage()));
this.regionManager.onRegionStale(recv.getRegion());
notifyRegionCacheInvalidate(recv.getRegion());
return false;
} else if (error.hasStaleCommand()) {
// this error is reported from raftstore:
// command outdated, please try later
logger.warn(String.format("Stale command for region [%s]", recv.getRegion()));
backOffer.doBackOff(BackOffFunction.BackOffFuncType.BoRegionMiss, new GrpcException(error.getMessage()));
return true;
} else if (error.hasRaftEntryTooLarge()) {
logger.warn(String.format("Raft too large for region [%s]", recv.getRegion()));
throw new StatusRuntimeException(Status.fromCode(Status.Code.UNAVAILABLE).withDescription(error.toString()));
} else if (error.hasKeyNotInRegion()) {
// this error is reported from raftstore:
// key requested is not in current region
// should not happen here.
ByteString invalidKey = error.getKeyNotInRegion().getKey();
logger.error(String.format("Key not in region [%s] for key [%s], this error should not happen here.", recv.getRegion(), KeyUtils.formatBytesUTF8(invalidKey)));
throw new StatusRuntimeException(Status.UNKNOWN.withDescription(error.toString()));
}
logger.warn(String.format("Unknown error %s for region [%s]", error, recv.getRegion()));
// For other errors, we only drop cache here.
// Upper level may split this task.
invalidateRegionStoreCache(recv.getRegion());
// retry if raft proposal is dropped, it indicates the store is in the middle of transition
if (error.getMessage().contains("Raft ProposalDropped") || error.getMessage().contains("is missing")) {
backOffer.doBackOff(BackOffFunction.BackOffFuncType.BoRegionMiss, new GrpcException(error.getMessage()));
return true;
}
}
boolean retry = false;
if (resp instanceof ScanResponse) {
List<KvPair> kvPairs = ((ScanResponse) resp).getPairsList();
List<Lock> locks = new ArrayList<>();
for (KvPair kvPair : kvPairs) {
if (kvPair.hasError()) {
Lock lock = AbstractLockResolverClient.extractLockFromKeyErr(kvPair.getError());
locks.add(lock);
}
}
if (!locks.isEmpty()) {
try {
resolveLocks(backOffer, locks);
retry = true;
} catch (KeyException e) {
logger.warn("Unable to handle KeyExceptions other than LockException", e);
}
}
} else {
// Key error handling logic
Kvrpcpb.KeyError keyError = getKeyError.apply(resp);
if (keyError != null) {
try {
Lock lock = AbstractLockResolverClient.extractLockFromKeyErr(keyError);
resolveLock(backOffer, lock);
retry = true;
} catch (KeyException e) {
logger.warn("Unable to handle KeyExceptions other than LockException", e);
}
}
}
return retry;
}
use of com.pingcap.tikv.txn.Lock in project tispark by pingcap.
the class RegionStoreClient method handleCopResponse.
// handleCopResponse checks coprocessor Response for region split and lock,
// returns more tasks when that happens, or handles the response if no error.
// if we're handling streaming coprocessor response, lastRange is the range of last
// successful response, otherwise it's nil.
private List<RangeSplitter.RegionTask> handleCopResponse(BackOffer backOffer, Coprocessor.Response response, List<Coprocessor.KeyRange> ranges, Queue<SelectResponse> responseQueue, long startTs) {
boolean forWrite = false;
if (response == null) {
// Send request failed, reasons may:
// 1. TiKV down
// 2. Network partition
backOffer.doBackOff(BackOffFunction.BackOffFuncType.BoRegionMiss, new GrpcException("TiKV down or Network partition"));
logger.warn("Re-splitting region task due to region error: TiKV down or Network partition");
// Split ranges
return RangeSplitter.newSplitter(this.regionManager).splitRangeByRegion(ranges, storeType);
}
if (response.hasRegionError()) {
Errorpb.Error regionError = response.getRegionError();
backOffer.doBackOff(BackOffFunction.BackOffFuncType.BoRegionMiss, new GrpcException(regionError.toString()));
logger.warn("Re-splitting region task due to region error:" + regionError.getMessage());
// Split ranges
return RangeSplitter.newSplitter(this.regionManager).splitRangeByRegion(ranges, storeType);
}
if (response.hasLocked()) {
Lock lock = new Lock(response.getLocked());
logger.debug(String.format("coprocessor encounters locks: %s", lock));
ResolveLockResult resolveLockResult = lockResolverClient.resolveLocks(backOffer, startTs, Collections.singletonList(lock), forWrite);
addResolvedLocks(startTs, resolveLockResult.getResolvedLocks());
long msBeforeExpired = resolveLockResult.getMsBeforeTxnExpired();
if (msBeforeExpired > 0) {
backOffer.doBackOffWithMaxSleep(BoTxnLockFast, msBeforeExpired, new LockException(lock));
}
// Split ranges
return RangeSplitter.newSplitter(this.regionManager).splitRangeByRegion(ranges, storeType);
}
String otherError = response.getOtherError();
if (!otherError.isEmpty()) {
logger.warn(String.format("Other error occurred, message: %s", otherError));
throw new GrpcException(otherError);
}
responseQueue.offer(doCoprocessor(response));
return null;
}
use of com.pingcap.tikv.txn.Lock in project tispark by pingcap.
the class RegionStoreClient method isPrewriteSuccess.
/**
* @param backOffer backOffer
* @param resp response
* @return Return true means the rpc call success. Return false means the rpc call fail,
* RegionStoreClient should retry. Throw an Exception means the rpc call fail,
* RegionStoreClient cannot handle this kind of error
* @throws TiClientInternalException
* @throws RegionException
* @throws KeyException
*/
private boolean isPrewriteSuccess(BackOffer backOffer, PrewriteResponse resp, long startTs) throws TiClientInternalException, KeyException, RegionException {
boolean forWrite = true;
if (resp == null) {
this.regionManager.onRequestFail(region);
throw new TiClientInternalException("Prewrite Response failed without a cause");
}
if (resp.hasRegionError()) {
throw new RegionException(resp.getRegionError());
}
boolean isSuccess = true;
List<Lock> locks = new ArrayList<>();
for (KeyError err : resp.getErrorsList()) {
if (err.hasLocked()) {
isSuccess = false;
Lock lock = new Lock(err.getLocked());
locks.add(lock);
} else {
throw new KeyException(err.toString());
}
}
if (isSuccess) {
return true;
}
ResolveLockResult resolveLockResult = lockResolverClient.resolveLocks(backOffer, startTs, locks, forWrite);
addResolvedLocks(startTs, resolveLockResult.getResolvedLocks());
long msBeforeExpired = resolveLockResult.getMsBeforeTxnExpired();
if (msBeforeExpired > 0) {
backOffer.doBackOffWithMaxSleep(BoTxnLock, msBeforeExpired, new KeyException(resp.getErrorsList().get(0)));
}
return false;
}
use of com.pingcap.tikv.txn.Lock in project tispark by pingcap.
the class RegionStoreClient method handleBatchGetResponse.
private List<KvPair> handleBatchGetResponse(BackOffer backOffer, BatchGetResponse resp, long version) {
boolean forWrite = false;
if (resp == null) {
this.regionManager.onRequestFail(region);
throw new TiClientInternalException("BatchGetResponse failed without a cause");
}
if (resp.hasRegionError()) {
throw new RegionException(resp.getRegionError());
}
List<Lock> locks = new ArrayList<>();
for (KvPair pair : resp.getPairsList()) {
if (pair.hasError()) {
if (pair.getError().hasLocked()) {
Lock lock = new Lock(pair.getError().getLocked());
locks.add(lock);
} else {
throw new KeyException(pair.getError());
}
}
}
if (!locks.isEmpty()) {
ResolveLockResult resolveLockResult = lockResolverClient.resolveLocks(backOffer, version, locks, forWrite);
addResolvedLocks(version, resolveLockResult.getResolvedLocks());
// resolveLocks already retried, just throw error to upper logic.
throw new TiKVException("locks not resolved, retry");
} else {
return resp.getPairsList();
}
}
use of com.pingcap.tikv.txn.Lock in project tispark by pingcap.
the class RegionStoreClient method doScan.
// TODO: resolve locks after scan
private List<KvPair> doScan(ScanResponse resp) {
// Check if kvPair contains error, it should be a Lock if hasError is true.
List<KvPair> kvPairs = resp.getPairsList();
List<KvPair> newKvPairs = new ArrayList<>();
for (KvPair kvPair : kvPairs) {
if (kvPair.hasError()) {
Lock lock = AbstractLockResolverClient.extractLockFromKeyErr(kvPair.getError());
newKvPairs.add(KvPair.newBuilder().setError(kvPair.getError()).setValue(kvPair.getValue()).setKey(lock.getKey()).build());
} else {
newKvPairs.add(kvPair);
}
}
return Collections.unmodifiableList(newKvPairs);
}
Aggregations