use of io.dingodb.raft.conf.Configuration in project dingo by dingodb.
the class RegionEngine method init.
@Override
public synchronized boolean init(final RegionEngineOptions opts) {
if (this.started) {
LOG.info("[RegionEngine: {}] already started.", this.region);
return true;
}
this.regionOpts = Requires.requireNonNull(opts, "opts");
this.fsm = new KVStoreStateMachine(this.region, this.storeEngine);
this.storeEngine.getStateListenerContainer().addStateListener(this.region.getId(), this);
// node options
NodeOptions nodeOpts = opts.getNodeOptions();
if (nodeOpts == null) {
nodeOpts = new NodeOptions();
}
final long metricsReportPeriod = opts.getMetricsReportPeriod();
if (metricsReportPeriod > 0) {
// metricsReportPeriod > 0 means enable metrics
nodeOpts.setEnableMetrics(true);
}
final Configuration initialConf = new Configuration();
if (!initialConf.parse(opts.getInitialServerList())) {
LOG.error("Fail to parse initial configuration {}.", opts.getInitialServerList());
return false;
}
nodeOpts.setInitialConf(initialConf);
nodeOpts.setFsm(this.fsm);
final String raftDataPath = opts.getRaftDataPath();
try {
FileUtils.forceMkdir(new File(raftDataPath));
} catch (final Throwable t) {
LOG.error("Fail to make dir for raftDataPath {}.", raftDataPath);
return false;
}
if (Strings.isBlank(nodeOpts.getLogUri())) {
final Path logUri = Paths.get(raftDataPath, "log");
nodeOpts.setLogUri(logUri.toString());
}
if (Strings.isBlank(nodeOpts.getRaftMetaUri())) {
final Path meteUri = Paths.get(raftDataPath, "meta");
nodeOpts.setRaftMetaUri(meteUri.toString());
}
if (Strings.isBlank(nodeOpts.getSnapshotUri())) {
final Path snapshotUri = Paths.get(raftDataPath, "snapshot");
nodeOpts.setSnapshotUri(snapshotUri.toString());
}
String storeOptionStr = "Null";
RaftStoreOptions raftStoreOptions = opts.getRaftStoreOptions();
if (raftStoreOptions != null) {
RaftLogStorageOptions raftLogStorageOptions = raftStoreOptions.getRaftLogStorageOptions();
nodeOpts.setRaftLogStorageOptions(raftLogStorageOptions);
storeOptionStr = raftStoreOptions.toString();
}
LOG.info("[RegionEngine: {}], log uri: {}, raft meta uri: {}, snapshot uri: {}. raftDBOptions:{}", this.region, nodeOpts.getLogUri(), nodeOpts.getRaftMetaUri(), nodeOpts.getSnapshotUri(), storeOptionStr);
final Endpoint serverAddress = opts.getServerAddress();
final PeerId serverId = new PeerId(serverAddress, 0);
final RpcServer rpcServer = this.storeEngine.getRpcServer();
this.raftGroupService = new RaftGroupService(opts.getRaftGroupId(), serverId, nodeOpts, rpcServer, true);
this.node = this.raftGroupService.start(false);
RouteTable.getInstance().updateConfiguration(this.raftGroupService.getGroupId(), nodeOpts.getInitialConf());
if (this.node != null) {
final RawKVStore rawKVStore = this.storeEngine.getRawKVStore();
final Executor readIndexExecutor = this.storeEngine.getReadIndexExecutor();
this.raftRawKVStore = new RaftRawKVStore(this.node, rawKVStore, readIndexExecutor);
this.metricsRawKVStore = new MetricsRawKVStore(this.region.getId(), this.raftRawKVStore);
// metrics config
if (this.regionMetricsReporter == null && metricsReportPeriod > 0) {
final MetricRegistry metricRegistry = this.node.getNodeMetrics().getMetricRegistry();
if (metricRegistry != null) {
final ScheduledExecutorService scheduler = this.storeEngine.getMetricsScheduler();
// start raft node metrics reporter
this.regionMetricsReporter = Slf4jReporter.forRegistry(metricRegistry).prefixedWith("region_" + this.region.getId()).withLoggingLevel(Slf4jReporter.LoggingLevel.INFO).outputTo(LOG).scheduleOn(scheduler).shutdownExecutorOnStop(scheduler != null).build();
this.regionMetricsReporter.start(metricsReportPeriod, TimeUnit.SECONDS);
}
}
if (this.storeEngine.getPlacementDriverClient() instanceof RemotePlacementDriverClient) {
HeartbeatOptions heartbeatOpts = opts.getHeartbeatOptions();
if (heartbeatOpts == null) {
heartbeatOpts = new HeartbeatOptions();
}
this.heartbeatSender = new RegionHeartbeatSender(this);
if (!this.heartbeatSender.init(heartbeatOpts)) {
LOG.error("Fail to init [HeartbeatSender].");
return false;
}
}
this.started = true;
LOG.info("[RegionEngine] start successfully: {}.", this);
}
return this.started;
}
use of io.dingodb.raft.conf.Configuration in project dingo by dingodb.
the class AbstractPlacementDriverClient method removeReplica.
@Override
public boolean removeReplica(final String regionId, final Peer peer, final boolean refreshConf) {
Requires.requireNonNull(peer, "peer");
Requires.requireNonNull(peer.getEndpoint(), "peer.endpoint");
final String raftGroupId = JRaftHelper.getJRaftGroupId(this.clusterName, regionId);
final Configuration conf = RouteTable.getInstance().getConfiguration(raftGroupId);
final Status status = this.cliService.removePeer(raftGroupId, conf, JRaftHelper.toJRaftPeerId(peer));
if (status.isOk()) {
if (refreshConf) {
refreshRouteConfiguration(regionId);
}
return true;
}
LOG.error("Fail to [removeReplica], [regionId: {}, peer: {}], status: {}.", regionId, peer, status);
return false;
}
use of io.dingodb.raft.conf.Configuration in project dingo by dingodb.
the class AbstractPlacementDriverClient method initRouteTableByRegion.
protected void initRouteTableByRegion(final RegionRouteTableOptions opts) {
final String regionId = Requires.requireNonNull(opts.getRegionId(), "opts.regionId");
final byte[] startKey = opts.getStartKeyBytes();
final byte[] endKey = opts.getEndKeyBytes();
final String initialServerList = opts.getInitialServerList();
final Region region = new Region();
final Configuration conf = new Configuration();
// region
region.setId(regionId);
region.setStartKey(startKey);
region.setEndKey(endKey);
region.setRegionEpoch(new RegionEpoch(-1, -1));
// peers
Requires.requireTrue(Strings.isNotBlank(initialServerList), "opts.initialServerList is blank");
conf.parse(initialServerList);
region.setPeers(JRaftHelper.toPeerList(conf.listPeers()));
// update raft route table
RouteTable.getInstance().updateConfiguration(JRaftHelper.getJRaftGroupId(clusterName, regionId), conf);
this.regionRouteTable.addOrUpdateRegion(region);
}
use of io.dingodb.raft.conf.Configuration in project dingo by dingodb.
the class LogManagerImpl method appendEntries.
@Override
public void appendEntries(final List<LogEntry> entries, final StableClosure done) {
Requires.requireNonNull(done, "done");
if (this.hasError) {
entries.clear();
Utils.runClosureInThread(done, new Status(RaftError.EIO, "Corrupted LogStorage"));
return;
}
boolean doUnlock = true;
this.writeLock.lock();
try {
if (!entries.isEmpty() && !checkAndResolveConflict(entries, done)) {
// If checkAndResolveConflict returns false, the done will be called in it.
entries.clear();
return;
}
for (int i = 0; i < entries.size(); i++) {
final LogEntry entry = entries.get(i);
// Set checksum after checkAndResolveConflict
if (this.raftOptions.isEnableLogEntryChecksum()) {
entry.setChecksum(entry.checksum());
}
if (entry.getType() == EntryType.ENTRY_TYPE_CONFIGURATION) {
Configuration oldConf = new Configuration();
if (entry.getOldPeers() != null) {
oldConf = new Configuration(entry.getOldPeers(), entry.getOldLearners());
}
final ConfigurationEntry conf = new ConfigurationEntry(entry.getId(), new Configuration(entry.getPeers(), entry.getLearners()), oldConf);
this.configManager.add(conf);
}
}
if (!entries.isEmpty()) {
done.setFirstLogIndex(entries.get(0).getId().getIndex());
this.logsInMemory.addAll(entries);
}
done.setEntries(entries);
// 1. release the lock.
doUnlock = false;
if (!wakeupAllWaiter(this.writeLock)) {
notifyLastLogIndexListeners();
}
// 2. Publish Event to Disruptor Queue.
final EventTranslator<StableClosureEvent> translator = (event, sequence) -> {
event.reset();
event.type = EventType.OTHER;
event.done = done;
};
doPublish(done, translator);
} finally {
if (doUnlock) {
this.writeLock.unlock();
}
}
}
use of io.dingodb.raft.conf.Configuration in project dingo by dingodb.
the class RocksDBLogStorage method load.
private void load(final ConfigurationManager confManager) {
checkState();
try (final RocksIterator it = this.db.newIterator(this.confHandle, this.totalOrderReadOptions)) {
it.seekToFirst();
while (it.isValid()) {
final byte[] ks = it.key();
final byte[] bs = it.value();
// LogEntry index
if (ks.length == 8) {
final LogEntry entry = this.logEntryDecoder.decode(bs);
if (entry != null) {
if (entry.getType() == EntryType.ENTRY_TYPE_CONFIGURATION) {
final ConfigurationEntry confEntry = new ConfigurationEntry();
confEntry.setId(new LogId(entry.getId().getIndex(), entry.getId().getTerm()));
confEntry.setConf(new Configuration(entry.getPeers(), entry.getLearners()));
if (entry.getOldPeers() != null) {
confEntry.setOldConf(new Configuration(entry.getOldPeers(), entry.getOldLearners()));
}
if (confManager != null) {
confManager.add(confEntry);
}
}
} else {
LOG.warn("Fail to decode conf entry at index {}, the log data is: {}.", Bits.getLong(ks, 0), BytesUtil.toHex(bs));
}
} else {
if (Arrays.equals(FIRST_LOG_IDX_KEY, ks)) {
setFirstLogIndex(Bits.getLong(bs, 0));
truncatePrefixInBackground(0L, this.firstLogIndex);
} else {
LOG.warn("Unknown entry in configuration storage key={}, value={}.", BytesUtil.toHex(ks), BytesUtil.toHex(bs));
}
}
it.next();
}
}
}
Aggregations