use of com.pingcap.tikv.exception.GrpcException in project tispark by pingcap.
the class KVErrorHandler method handleResponseError.
// Referenced from TiDB
// store/tikv/region_request.go - onRegionError
/**
* @return true: client should retry
*/
@Override
public boolean handleResponseError(BackOffer backOffer, RespT resp) {
if (resp == null) {
String msg = String.format("Request Failed with unknown reason for region region [%s]", recv.getRegion());
logger.warn(msg);
return handleRequestError(backOffer, new GrpcException(msg));
}
// Region error handling logic
Errorpb.Error error = getRegionError(resp);
if (error != null) {
if (error.hasNotLeader()) {
// this error is reported from raftstore:
// peer of current request is not leader, the following might be its causes:
// 1. cache is outdated, region has changed its leader, can be solved by re-fetching from PD
// 2. leader of current region is missing, need to wait and then fetch region info from PD
long newStoreId = error.getNotLeader().getLeader().getStoreId();
boolean retry;
// update Leader here
logger.warn(String.format("NotLeader Error with region id %d and store id %d, new store id %d", recv.getRegion().getId(), recv.getRegion().getLeader().getStoreId(), newStoreId));
BackOffFunction.BackOffFuncType backOffFuncType;
// since issuing store = NO_LEADER_STORE_ID requests to pd will definitely fail.
if (newStoreId != NO_LEADER_STORE_ID) {
// If update leader fails, we need to fetch new region info from pd,
// and re-split key range for new region. Setting retry to false will
// stop retry and enter handleCopResponse logic, which would use RegionMiss
// backOff strategy to wait, fetch new region and re-split key range.
// onNotLeader is only needed when updateLeader succeeds, thus switch
// to a new store address.
TiRegion newRegion = this.regionManager.updateLeader(recv.getRegion(), newStoreId);
retry = newRegion != null && recv.onNotLeader(this.regionManager.getStoreById(newStoreId), newRegion);
if (!retry) {
notifyRegionStoreCacheInvalidate(recv.getRegion(), CacheInvalidateEvent.CacheType.LEADER);
}
backOffFuncType = BackOffFunction.BackOffFuncType.BoUpdateLeader;
} else {
logger.info(String.format("Received zero store id, from region %d try next time", recv.getRegion().getId()));
backOffFuncType = BackOffFunction.BackOffFuncType.BoRegionMiss;
retry = false;
}
if (!retry) {
this.regionManager.invalidateRegion(recv.getRegion());
}
backOffer.doBackOff(backOffFuncType, new GrpcException(error.toString()));
return retry;
} else if (error.hasStoreNotMatch()) {
// this error is reported from raftstore:
// store_id requested at the moment is inconsistent with that expected
// Solution:re-fetch from PD
long storeId = recv.getRegion().getLeader().getStoreId();
long actualStoreId = error.getStoreNotMatch().getActualStoreId();
logger.warn(String.format("Store Not Match happened with region id %d, store id %d, actual store id %d", recv.getRegion().getId(), storeId, actualStoreId));
invalidateRegionStoreCache(recv.getRegion());
recv.onStoreNotMatch(this.regionManager.getStoreById(storeId));
// throwing it out.
return false;
} else if (error.hasEpochNotMatch()) {
// this error is reported from raftstore:
// region has outdated version,please try later.
logger.warn(String.format("Stale Epoch encountered for region [%s]", recv.getRegion()));
this.regionManager.onRegionStale(recv.getRegion());
notifyRegionCacheInvalidate(recv.getRegion());
return false;
} else if (error.hasServerIsBusy()) {
// this error is reported from kv:
// will occur when write pressure is high. Please try later.
logger.warn(String.format("Server is busy for region [%s], reason: %s", recv.getRegion(), error.getServerIsBusy().getReason()));
backOffer.doBackOff(BackOffFunction.BackOffFuncType.BoServerBusy, new StatusRuntimeException(Status.fromCode(Status.Code.UNAVAILABLE).withDescription(error.toString())));
backOffer.doBackOff(BackOffFunction.BackOffFuncType.BoRegionMiss, new GrpcException(error.getMessage()));
return true;
} else if (error.hasRegionNotFound()) {
backOffer.doBackOff(BackOffFunction.BackOffFuncType.BoRegionMiss, new GrpcException(error.getMessage()));
this.regionManager.onRegionStale(recv.getRegion());
notifyRegionCacheInvalidate(recv.getRegion());
return false;
} else if (error.hasStaleCommand()) {
// this error is reported from raftstore:
// command outdated, please try later
logger.warn(String.format("Stale command for region [%s]", recv.getRegion()));
backOffer.doBackOff(BackOffFunction.BackOffFuncType.BoRegionMiss, new GrpcException(error.getMessage()));
return true;
} else if (error.hasRaftEntryTooLarge()) {
logger.warn(String.format("Raft too large for region [%s]", recv.getRegion()));
throw new StatusRuntimeException(Status.fromCode(Status.Code.UNAVAILABLE).withDescription(error.toString()));
} else if (error.hasKeyNotInRegion()) {
// this error is reported from raftstore:
// key requested is not in current region
// should not happen here.
ByteString invalidKey = error.getKeyNotInRegion().getKey();
logger.error(String.format("Key not in region [%s] for key [%s], this error should not happen here.", recv.getRegion(), KeyUtils.formatBytesUTF8(invalidKey)));
throw new StatusRuntimeException(Status.UNKNOWN.withDescription(error.toString()));
}
logger.warn(String.format("Unknown error %s for region [%s]", error, recv.getRegion()));
// For other errors, we only drop cache here.
// Upper level may split this task.
invalidateRegionStoreCache(recv.getRegion());
// retry if raft proposal is dropped, it indicates the store is in the middle of transition
if (error.getMessage().contains("Raft ProposalDropped") || error.getMessage().contains("is missing")) {
backOffer.doBackOff(BackOffFunction.BackOffFuncType.BoRegionMiss, new GrpcException(error.getMessage()));
return true;
}
}
boolean retry = false;
if (resp instanceof ScanResponse) {
List<KvPair> kvPairs = ((ScanResponse) resp).getPairsList();
List<Lock> locks = new ArrayList<>();
for (KvPair kvPair : kvPairs) {
if (kvPair.hasError()) {
Lock lock = AbstractLockResolverClient.extractLockFromKeyErr(kvPair.getError());
locks.add(lock);
}
}
if (!locks.isEmpty()) {
try {
resolveLocks(backOffer, locks);
retry = true;
} catch (KeyException e) {
logger.warn("Unable to handle KeyExceptions other than LockException", e);
}
}
} else {
// Key error handling logic
Kvrpcpb.KeyError keyError = getKeyError.apply(resp);
if (keyError != null) {
try {
Lock lock = AbstractLockResolverClient.extractLockFromKeyErr(keyError);
resolveLock(backOffer, lock);
retry = true;
} catch (KeyException e) {
logger.warn("Unable to handle KeyExceptions other than LockException", e);
}
}
}
return retry;
}
use of com.pingcap.tikv.exception.GrpcException in project tispark by pingcap.
the class RegionStoreClient method handleCopResponse.
// handleCopResponse checks coprocessor Response for region split and lock,
// returns more tasks when that happens, or handles the response if no error.
// if we're handling streaming coprocessor response, lastRange is the range of last
// successful response, otherwise it's nil.
private List<RangeSplitter.RegionTask> handleCopResponse(BackOffer backOffer, Coprocessor.Response response, List<Coprocessor.KeyRange> ranges, Queue<SelectResponse> responseQueue, long startTs) {
boolean forWrite = false;
if (response == null) {
// Send request failed, reasons may:
// 1. TiKV down
// 2. Network partition
backOffer.doBackOff(BackOffFunction.BackOffFuncType.BoRegionMiss, new GrpcException("TiKV down or Network partition"));
logger.warn("Re-splitting region task due to region error: TiKV down or Network partition");
// Split ranges
return RangeSplitter.newSplitter(this.regionManager).splitRangeByRegion(ranges, storeType);
}
if (response.hasRegionError()) {
Errorpb.Error regionError = response.getRegionError();
backOffer.doBackOff(BackOffFunction.BackOffFuncType.BoRegionMiss, new GrpcException(regionError.toString()));
logger.warn("Re-splitting region task due to region error:" + regionError.getMessage());
// Split ranges
return RangeSplitter.newSplitter(this.regionManager).splitRangeByRegion(ranges, storeType);
}
if (response.hasLocked()) {
Lock lock = new Lock(response.getLocked());
logger.debug(String.format("coprocessor encounters locks: %s", lock));
ResolveLockResult resolveLockResult = lockResolverClient.resolveLocks(backOffer, startTs, Collections.singletonList(lock), forWrite);
addResolvedLocks(startTs, resolveLockResult.getResolvedLocks());
long msBeforeExpired = resolveLockResult.getMsBeforeTxnExpired();
if (msBeforeExpired > 0) {
backOffer.doBackOffWithMaxSleep(BoTxnLockFast, msBeforeExpired, new LockException(lock));
}
// Split ranges
return RangeSplitter.newSplitter(this.regionManager).splitRangeByRegion(ranges, storeType);
}
String otherError = response.getOtherError();
if (!otherError.isEmpty()) {
logger.warn(String.format("Other error occurred, message: %s", otherError));
throw new GrpcException(otherError);
}
responseQueue.offer(doCoprocessor(response));
return null;
}
use of com.pingcap.tikv.exception.GrpcException in project tispark by pingcap.
the class PDErrorHandler method handleResponseError.
@Override
public boolean handleResponseError(BackOffer backOffer, RespT resp) {
if (resp == null) {
return false;
}
PDError error = getError.apply(resp);
if (error != null) {
switch(error.getErrorType()) {
case PD_ERROR:
backOffer.doBackOff(BackOffFunction.BackOffFuncType.BoPDRPC, new GrpcException(error.toString()));
client.updateLeader();
return true;
case REGION_PEER_NOT_ELECTED:
logger.debug(error.getMessage());
backOffer.doBackOff(BackOffFunction.BackOffFuncType.BoPDRPC, new GrpcException(error.toString()));
return true;
default:
throw new TiClientInternalException("Unknown error type encountered: " + error);
}
}
return false;
}
use of com.pingcap.tikv.exception.GrpcException in project tispark by pingcap.
the class LockResolverTest method versionTest.
private void versionTest(boolean hasLock, boolean blockingRead) {
for (int i = 0; i < 26; i++) {
ByteString key = ByteString.copyFromUtf8(String.valueOf((char) ('a' + i)));
TiRegion tiRegion = session.getRegionManager().getRegionByKey(key);
RegionStoreClient client = builder.build(tiRegion);
BackOffer backOffer = ConcreteBackOffer.newGetBackOff();
if (blockingRead) {
try {
ByteString v = client.get(backOffer, key, session.getTimestamp().getVersion());
if (hasLock && i == 3) {
// key "d" should be locked
fail();
} else {
assertEquals(String.valueOf((char) ('a' + i)), v.toStringUtf8());
}
} catch (GrpcException e) {
assertEquals(e.getMessage(), "retry is exhausted.");
}
} else {
ByteString v = client.get(backOffer, key, session.getTimestamp().getVersion());
assertEquals(String.valueOf((char) ('a' + i)), v.toStringUtf8());
}
}
}
use of com.pingcap.tikv.exception.GrpcException in project tispark by pingcap.
the class LockResolverTest method checkTTLNotExpired.
void checkTTLNotExpired(String key) {
try {
RegionStoreClient client = getRegionStoreClient(key);
BackOffer backOffer = ConcreteBackOffer.newCustomBackOff(CHECK_TTL_BACKOFF);
// In SI mode, a lock <key, value2> is read. Try resolve it, but failed, cause TTL not
// expires.
client.get(backOffer, ByteString.copyFromUtf8(key), session.getTimestamp().getVersion());
fail();
} catch (GrpcException e) {
assertEquals(e.getMessage(), "retry is exhausted.");
}
}
Aggregations