use of io.dingodb.store.row.metadata.Region in project dingo by dingodb.
the class StoreEngine method doSplit.
public void doSplit(final String regionId, final String newRegionId, final byte[] splitKey) {
try {
Requires.requireNonNull(regionId, "regionId");
Requires.requireNonNull(newRegionId, "newRegionId");
final RegionEngine parent = getRegionEngine(regionId);
final Region region = parent.getRegion().copy();
final RegionEngineOptions rOpts = parent.copyRegionOpts();
region.setId(newRegionId);
region.setStartKey(splitKey);
region.setRegionEpoch(new RegionEpoch(-1, -1));
rOpts.setRegionId(newRegionId);
rOpts.setStartKeyBytes(region.getStartKey());
rOpts.setEndKeyBytes(region.getEndKey());
rOpts.setRaftGroupId(JRaftHelper.getJRaftGroupId(this.pdClient.getClusterName(), newRegionId));
rOpts.setRaftDataPath(null);
String baseRaftDataPath = "";
if (this.storeOpts.getStoreDBOptions() != null) {
baseRaftDataPath = this.storeOpts.getRaftStoreOptions().getDataPath();
}
String raftDataPath = JRaftHelper.getRaftDataPath(baseRaftDataPath, region.getId(), getSelfEndpoint().getPort());
rOpts.setRaftDataPath(raftDataPath);
rOpts.setRaftStoreOptions(this.storeOpts.getRaftStoreOptions());
final RegionEngine engine = new RegionEngine(region, this);
if (!engine.init(rOpts)) {
LOG.error("Fail to init [RegionEngine: {}].", region);
throw Errors.REGION_ENGINE_FAIL.exception();
}
// update parent conf
final Region pRegion = parent.getRegion();
final RegionEpoch pEpoch = pRegion.getRegionEpoch();
final long version = pEpoch.getVersion();
// version + 1
pEpoch.setVersion(version + 1);
// update endKey
pRegion.setEndKey(splitKey);
// the following two lines of code can make a relation of 'happens-before' for
// read 'pRegion', because that a write to a ConcurrentMap happens-before every
// subsequent read of that ConcurrentMap.
this.regionEngineTable.put(region.getId(), engine);
registerRegionKVService(new DefaultRegionKVService(engine));
// update local regionRouteTable
this.pdClient.getRegionRouteTable().splitRegion(pRegion.getId(), region);
/**
* when Region is split, then the cluster info should be update.
* 1. using the split region to replace the old region
* 2. insert the split new region
* 3. call the pdClient to notify the placement driver.
*/
// todo Huzx
/*
{
Store localStore = this.pdClient.getCurrentStore();
List<Region> regionList = new ArrayList<>();
for (Map.Entry<Long, RegionEngine> entry : this.regionEngineTable.entrySet()) {
regionList.add(entry.getValue().getRegion().copy());
}
localStore.setRegions(regionList);
this.pdClient.refreshStore(localStore);
}
*/
} finally {
this.splitting.set(false);
}
}
use of io.dingodb.store.row.metadata.Region in project dingo by dingodb.
the class StoreEngine method initAllRegionEngine.
private boolean initAllRegionEngine(final StoreEngineOptions opts, final Store store) {
Requires.requireNonNull(opts, "opts");
Requires.requireNonNull(store, "store");
Requires.requireNonNull(opts.getRaftStoreOptions(), "raftDBOptions is Null");
String baseRaftDataPath = opts.getRaftStoreOptions().getDataPath();
if (baseRaftDataPath != null && Strings.isNotBlank(baseRaftDataPath)) {
try {
FileUtils.forceMkdir(new File(baseRaftDataPath));
} catch (final Throwable t) {
LOG.error("Fail to make dir for raftDataPath: {}.", baseRaftDataPath);
return false;
}
} else {
LOG.error("Init Region found region raft path is empty. store:{}, raftStoreOpt:{}", store.getId(), opts.getRaftStoreOptions());
return false;
}
final Endpoint serverAddress = opts.getServerAddress();
final List<RegionEngineOptions> rOptsList = opts.getRegionEngineOptionsList();
final List<Region> regionList = store.getRegions();
Requires.requireTrue(rOptsList.size() == regionList.size());
for (int i = 0; i < rOptsList.size(); i++) {
final RegionEngineOptions rOpts = rOptsList.get(i);
boolean isOK = inConfiguration(rOpts.getServerAddress().toString(), rOpts.getInitialServerList());
if (!isOK) {
LOG.warn("Invalid serverAddress:{} not in initialServerList:{}, whole options:{}", rOpts.getServerAddress(), rOpts.getInitialServerList(), rOpts);
continue;
}
final Region region = regionList.get(i);
if (Strings.isBlank(rOpts.getRaftDataPath())) {
final String raftDataPath = JRaftHelper.getRaftDataPath(baseRaftDataPath, region.getId(), serverAddress.getPort());
rOpts.setRaftDataPath(raftDataPath);
}
Requires.requireNonNull(region.getRegionEpoch(), "regionEpoch");
final RegionEngine engine = new RegionEngine(region, this);
if (engine.init(rOpts)) {
final RegionKVService regionKVService = new DefaultRegionKVService(engine);
registerRegionKVService(regionKVService);
this.regionEngineTable.put(region.getId(), engine);
} else {
LOG.error("Fail to init [RegionEngine: {}].", region);
return false;
}
}
return true;
}
use of io.dingodb.store.row.metadata.Region in project dingo by dingodb.
the class DefaultDingoRowStore method internalExecute.
private void internalExecute(final String regionId, final NodeExecutor executor, final CompletableFuture<Boolean> future, final int retriesLeft, final Errors lastCause) {
final Region region = this.pdClient.getRegionById(regionId);
final RegionEngine regionEngine = getRegionEngine(region.getId(), true);
final RetryRunner retryRunner = retryCause -> internalExecute(regionId, executor, future, retriesLeft - 1, retryCause);
final FailoverClosure<Boolean> closure = new FailoverClosureImpl<>(future, retriesLeft, retryRunner);
if (regionEngine != null) {
if (ensureOnValidEpoch(region, regionEngine, closure)) {
getRawKVStore(regionEngine).execute(executor, true, closure);
}
} else {
final NodeExecuteRequest request = new NodeExecuteRequest();
request.setNodeExecutor(executor);
request.setRegionId(region.getId());
request.setRegionEpoch(region.getRegionEpoch());
this.dingoRowStoreRpcService.callAsyncWithRpc(request, closure, lastCause);
}
}
use of io.dingodb.store.row.metadata.Region in project dingo by dingodb.
the class DefaultDingoRowStore method internalReleaseLockWith.
private void internalReleaseLockWith(final byte[] key, final DistributedLock.Acquirer acquirer, final CompletableFuture<DistributedLock.Owner> future, final int retriesLeft, final Errors lastCause) {
final Region region = this.pdClient.findRegionByKey(key, ErrorsHelper.isInvalidEpoch(lastCause));
final RegionEngine regionEngine = getRegionEngine(region.getId(), true);
final RetryRunner retryRunner = retryCause -> internalReleaseLockWith(key, acquirer, future, retriesLeft - 1, retryCause);
final FailoverClosure<DistributedLock.Owner> closure = new FailoverClosureImpl<>(future, retriesLeft, retryRunner);
if (regionEngine != null) {
if (ensureOnValidEpoch(region, regionEngine, closure)) {
getRawKVStore(regionEngine).releaseLockWith(key, acquirer, closure);
}
} else {
final KeyUnlockRequest request = new KeyUnlockRequest();
request.setKey(key);
request.setAcquirer(acquirer);
request.setRegionId(region.getId());
request.setRegionEpoch(region.getRegionEpoch());
this.dingoRowStoreRpcService.callAsyncWithRpc(request, closure, lastCause);
}
}
use of io.dingodb.store.row.metadata.Region in project dingo by dingodb.
the class DefaultDingoRowStore method internalGetSequence.
private void internalGetSequence(final byte[] seqKey, final int step, final CompletableFuture<Sequence> future, final int retriesLeft, final Errors lastCause) {
final Region region = this.pdClient.findRegionByKey(seqKey, ErrorsHelper.isInvalidEpoch(lastCause));
final RegionEngine regionEngine = getRegionEngine(region.getId(), true);
final RetryRunner retryRunner = retryCause -> internalGetSequence(seqKey, step, future, retriesLeft - 1, retryCause);
final FailoverClosure<Sequence> closure = new FailoverClosureImpl<>(future, retriesLeft, retryRunner);
if (regionEngine != null) {
if (ensureOnValidEpoch(region, regionEngine, closure)) {
getRawKVStore(regionEngine).getSequence(seqKey, step, closure);
}
} else {
final GetSequenceRequest request = new GetSequenceRequest();
request.setSeqKey(seqKey);
request.setStep(step);
request.setRegionId(region.getId());
request.setRegionEpoch(region.getRegionEpoch());
this.dingoRowStoreRpcService.callAsyncWithRpc(request, closure, lastCause);
}
}
Aggregations