use of com.pingcap.tikv.exception.GrpcException in project tispark by pingcap.
the class ConcreteBackOffer method doBackOffWithMaxSleep.
@Override
public void doBackOffWithMaxSleep(BackOffFunction.BackOffFuncType funcType, long maxSleepMs, Exception err) {
BackOffFunction backOffFunction = backOffFunctionMap.computeIfAbsent(funcType, this::createBackOffFunc);
// Back off will be done here
totalSleep += backOffFunction.doBackOff(maxSleepMs);
logger.debug(String.format("%s, retry later(totalSleep %dms, maxSleep %dms)", err.getMessage(), totalSleep, maxSleep));
errors.add(err);
if (maxSleep > 0 && totalSleep >= maxSleep) {
StringBuilder errMsg = new StringBuilder(String.format("BackOffer.maxSleep %dms is exceeded, errors:", maxSleep));
for (int i = 0; i < errors.size(); i++) {
Exception curErr = errors.get(i);
// Print only last 3 errors for non-DEBUG log levels.
if (logger.isDebugEnabled() || i >= errors.size() - 3) {
errMsg.append("\n").append(i).append(".").append(curErr.toString());
}
}
logger.warn(errMsg.toString());
// Use the last backoff type to generate an exception
throw new GrpcException("retry is exhausted.", err);
}
}
use of com.pingcap.tikv.exception.GrpcException in project tispark by pingcap.
the class PDClientTest method testRetryPolicy.
@Test
public void testRetryPolicy() throws Exception {
long storeId = 1024;
ExecutorService service = Executors.newCachedThreadPool();
pdServer.addGetStoreResp(null);
pdServer.addGetStoreResp(null);
pdServer.addGetStoreResp(GrpcUtils.makeGetStoreResponse(pdServer.getClusterId(), GrpcUtils.makeStore(storeId, "", Metapb.StoreState.Up)));
try (PDClient client = session.getPDClient()) {
Callable<Store> storeCallable = () -> client.getStore(ConcreteBackOffer.newCustomBackOff(5000), 0);
Future<Store> storeFuture = service.submit(storeCallable);
try {
Store r = storeFuture.get(50, TimeUnit.SECONDS);
assertEquals(r.getId(), storeId);
} catch (TimeoutException e) {
fail();
}
// Should fail
pdServer.addGetStoreResp(null);
pdServer.addGetStoreResp(null);
pdServer.addGetStoreResp(null);
pdServer.addGetStoreResp(null);
pdServer.addGetStoreResp(null);
pdServer.addGetStoreResp(null);
pdServer.addGetStoreResp(GrpcUtils.makeGetStoreResponse(pdServer.getClusterId(), GrpcUtils.makeStore(storeId, "", Metapb.StoreState.Up)));
try {
client.getStore(defaultBackOff(), 0);
} catch (GrpcException e) {
assertTrue(true);
return;
}
fail();
}
}
use of com.pingcap.tikv.exception.GrpcException in project tispark by pingcap.
the class BackOffFunction method doBackOff.
/**
* Do back off in exponential with optional jitters according to different back off strategies.
* See http://www.awsarchitectureblog.com/2015/03/backoff.html
*/
long doBackOff(long maxSleepMs) {
long sleep = 0;
long v = expo(base, cap, attempts);
switch(strategy) {
case NoJitter:
sleep = v;
break;
case FullJitter:
sleep = ThreadLocalRandom.current().nextLong(v);
break;
case EqualJitter:
sleep = v / 2 + ThreadLocalRandom.current().nextLong(v / 2);
break;
case DecorrJitter:
sleep = Math.min(cap, base + ThreadLocalRandom.current().nextLong(lastSleep * 3 - base));
break;
}
if (maxSleepMs > 0 && sleep > maxSleepMs) {
sleep = maxSleepMs;
}
try {
Thread.sleep(sleep);
} catch (InterruptedException e) {
throw new GrpcException(e);
}
attempts++;
lastSleep = sleep;
return lastSleep;
}
use of com.pingcap.tikv.exception.GrpcException in project tispark by pingcap.
the class PDClient method updateTiFlashReplicaStatus.
public void updateTiFlashReplicaStatus() {
ByteSequence prefix = ByteSequence.from(TIFLASH_TABLE_SYNC_PROGRESS_PATH, StandardCharsets.UTF_8);
for (int i = 0; i < 5; i++) {
CompletableFuture<GetResponse> resp;
try {
resp = etcdClient.getKVClient().get(prefix, GetOption.newBuilder().withPrefix(prefix).build());
} catch (Exception e) {
logger.info("get tiflash table replica sync progress failed, continue checking.", e);
continue;
}
GetResponse getResp;
try {
getResp = resp.get();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
continue;
} catch (ExecutionException e) {
throw new GrpcException("failed to update tiflash replica", e);
}
ConcurrentMap<Long, Double> progressMap = new ConcurrentHashMap<>();
for (KeyValue kv : getResp.getKvs()) {
long tableId;
try {
tableId = Long.parseLong(kv.getKey().toString().substring(TIFLASH_TABLE_SYNC_PROGRESS_PATH.length()));
} catch (Exception e) {
logger.debug("invalid tiflash table replica sync progress key. key = " + kv.getKey().toString());
continue;
}
double progress;
try {
progress = Double.parseDouble(kv.getValue().toString());
} catch (Exception e) {
logger.info("invalid tiflash table replica sync progress value. value = " + kv.getValue().toString());
continue;
}
progressMap.put(tableId, progress);
}
tiflashReplicaMap = progressMap;
break;
}
}
use of com.pingcap.tikv.exception.GrpcException in project tispark by pingcap.
the class TTLManager method sendTxnHeartBeat.
private void sendTxnHeartBeat(BackOffer bo, long ttl) {
Pair<TiRegion, Metapb.Store> pair = regionManager.getRegionStorePairByKey(primaryLock);
TiRegion tiRegion = pair.first;
Metapb.Store store = pair.second;
ClientRPCResult result = kvClient.txnHeartBeat(bo, primaryLock, startTS, ttl, tiRegion, store);
if (!result.isSuccess() && !result.isRetry()) {
throw new TiBatchWriteException("sendTxnHeartBeat error", result.getException());
}
if (result.isRetry()) {
try {
bo.doBackOff(BackOffFunction.BackOffFuncType.BoRegionMiss, new GrpcException(String.format("sendTxnHeartBeat failed, regionId=%s", tiRegion.getId()), result.getException()));
this.regionManager.invalidateStore(store.getId());
this.regionManager.invalidateRegion(tiRegion);
// re-split keys and commit again.
sendTxnHeartBeat(bo, ttl);
} catch (GrpcException e) {
String errorMsg = String.format("sendTxnHeartBeat error, regionId=%s, detail=%s", tiRegion.getId(), e.getMessage());
throw new TiBatchWriteException(errorMsg, e);
}
}
LOG.debug("sendTxnHeartBeat success key={} ttl={} success", LogDesensitization.hide(KeyUtils.formatBytes(primaryLock)), ttl);
}
Aggregations