use of io.dingodb.raft.entity.PeerId in project dingo by dingodb.
the class FSMCallerImpl method doSnapshotSave.
private void doSnapshotSave(final SaveSnapshotClosure done) {
Requires.requireNonNull(done, "SaveSnapshotClosure is null");
final long lastAppliedIndex = this.lastAppliedIndex.get();
final RaftOutter.SnapshotMeta.Builder metaBuilder = //
RaftOutter.SnapshotMeta.newBuilder().setLastIncludedIndex(//
lastAppliedIndex).setLastIncludedTerm(this.lastAppliedTerm);
final ConfigurationEntry confEntry = this.logManager.getConfiguration(lastAppliedIndex);
if (confEntry == null || confEntry.isEmpty()) {
LOG.error("Empty conf entry for lastAppliedIndex={}", lastAppliedIndex);
Utils.runClosureInThread(done, new Status(RaftError.EINVAL, "Empty conf entry for lastAppliedIndex=%s", lastAppliedIndex));
return;
}
for (final PeerId peer : confEntry.getConf()) {
metaBuilder.addPeers(peer.toString());
}
for (final PeerId peer : confEntry.getConf().getLearners()) {
metaBuilder.addLearners(peer.toString());
}
if (confEntry.getOldConf() != null) {
for (final PeerId peer : confEntry.getOldConf()) {
metaBuilder.addOldPeers(peer.toString());
}
for (final PeerId peer : confEntry.getOldConf().getLearners()) {
metaBuilder.addOldLearners(peer.toString());
}
}
final SnapshotWriter writer = done.start(metaBuilder.build());
if (writer == null) {
done.run(new Status(RaftError.EINVAL, "snapshot_storage create SnapshotWriter failed"));
return;
}
this.fsm.onSnapshotSave(writer, done);
}
use of io.dingodb.raft.entity.PeerId in project dingo by dingodb.
the class NodeImpl method removeLearners.
@Override
public void removeLearners(final List<PeerId> learners, final Closure done) {
checkPeers(learners);
this.writeLock.lock();
try {
final Configuration newConf = new Configuration(this.conf.getConf());
for (final PeerId peer : learners) {
newConf.removeLearner(peer);
}
unsafeRegisterConfChange(this.conf.getConf(), newConf, done);
} finally {
this.writeLock.unlock();
}
}
use of io.dingodb.raft.entity.PeerId in project dingo by dingodb.
the class NodeImpl method transferLeadershipTo.
@Override
public Status transferLeadershipTo(final PeerId peer) {
Requires.requireNonNull(peer, "Null peer");
this.writeLock.lock();
try {
if (this.state != State.STATE_LEADER) {
LOG.warn("Node {} can't transfer leadership to peer {} as it is in state {}.", getNodeId(), peer, this.state);
return new Status(this.state == State.STATE_TRANSFERRING ? RaftError.EBUSY : RaftError.EPERM, "Not a leader");
}
if (this.confCtx.isBusy()) {
// It's very messy to deal with the case when the |peer| received
// TimeoutNowRequest and increase the term while somehow another leader
// which was not replicated with the newest configuration has been
// elected. If no add_peer with this very |peer| is to be invoked ever
// after nor this peer is to be killed, this peer will spin in the voting
// procedure and make the each new leader stepped down when the peer
// reached vote timeout and it starts to vote (because it will increase
// the term of the group)
// To make things simple, refuse the operation and force users to
// invoke transfer_leadership_to after configuration changing is
// completed so that the peer's configuration is up-to-date when it
// receives the TimeOutNowRequest.
LOG.warn("Node {} refused to transfer leadership to peer {} when the leader is changing the configuration.", getNodeId(), peer);
return new Status(RaftError.EBUSY, "Changing the configuration");
}
PeerId peerId = peer.copy();
// last_log_id will be selected.
if (peerId.equals(PeerId.ANY_PEER)) {
LOG.info("Node {} starts to transfer leadership to any peer.", getNodeId());
if ((peerId = this.replicatorGroup.findTheNextCandidate(this.conf)) == null) {
return new Status(-1, "Candidate not found for any peer");
}
}
if (peerId.equals(this.serverId)) {
LOG.info("Node {} transferred leadership to self.", this.serverId);
return Status.OK();
}
if (!this.conf.contains(peerId)) {
LOG.info("Node {} refused to transfer leadership to peer {} as it is not in {}.", getNodeId(), peer, this.conf);
return new Status(RaftError.EINVAL, "Not in current configuration");
}
final long lastLogIndex = this.logManager.getLastLogIndex();
if (!this.replicatorGroup.transferLeadershipTo(peerId, lastLogIndex)) {
LOG.warn("No such peer {}.", peer);
return new Status(RaftError.EINVAL, "No such peer %s", peer);
}
this.state = State.STATE_TRANSFERRING;
final Status status = new Status(RaftError.ETRANSFERLEADERSHIP, "Raft leader is transferring leadership to %s", peerId);
onLeaderStop(status);
LOG.info("Node {} starts to transfer leadership to peer {}.", getNodeId(), peer);
final StopTransferArg stopArg = new StopTransferArg(this, this.currTerm, peerId);
this.stopTransferArg = stopArg;
this.transferTimer = this.timerManager.schedule(() -> onTransferTimeout(stopArg), this.options.getElectionTimeoutMs(), TimeUnit.MILLISECONDS);
} finally {
this.writeLock.unlock();
}
return Status.OK();
}
use of io.dingodb.raft.entity.PeerId in project dingo by dingodb.
the class NodeImpl method getAliveNodes.
// in read_lock
private List<PeerId> getAliveNodes(final Collection<PeerId> peers, final long monotonicNowMs) {
final int leaderLeaseTimeoutMs = this.options.getLeaderLeaseTimeoutMs();
final List<PeerId> alivePeers = new ArrayList<>();
for (final PeerId peer : peers) {
if (peer.equals(this.serverId)) {
alivePeers.add(peer.copy());
continue;
}
if (monotonicNowMs - this.replicatorGroup.getLastRpcSendTimestamp(peer) <= leaderLeaseTimeoutMs) {
alivePeers.add(peer.copy());
}
}
return alivePeers;
}
use of io.dingodb.raft.entity.PeerId in project dingo by dingodb.
the class NodeImpl method checkDeadNodes0.
private boolean checkDeadNodes0(final List<PeerId> peers, final long monotonicNowMs, final boolean checkReplicator, final Configuration deadNodes) {
final int leaderLeaseTimeoutMs = this.options.getLeaderLeaseTimeoutMs();
int aliveCount = 0;
long startLease = Long.MAX_VALUE;
for (final PeerId peer : peers) {
if (peer.equals(this.serverId)) {
aliveCount++;
continue;
}
if (checkReplicator) {
checkReplicator(peer);
}
final long lastRpcSendTimestamp = this.replicatorGroup.getLastRpcSendTimestamp(peer);
if (monotonicNowMs - lastRpcSendTimestamp <= leaderLeaseTimeoutMs) {
aliveCount++;
if (startLease > lastRpcSendTimestamp) {
startLease = lastRpcSendTimestamp;
}
continue;
}
if (deadNodes != null) {
deadNodes.addPeer(peer);
}
}
if (aliveCount >= peers.size() / 2 + 1) {
updateLastLeaderTimestamp(startLease);
return true;
}
return false;
}
Aggregations