use of org.apache.ignite.raft.jraft.conf.Configuration in project ignite-3 by apache.
the class LogManagerImpl method confFromMeta.
private Configuration confFromMeta(final SnapshotMeta meta) {
final Configuration conf = new Configuration();
if (meta.peersList() != null) {
for (String metaPeer : meta.peersList()) {
final PeerId peer = new PeerId();
peer.parse(metaPeer);
conf.addPeer(peer);
}
}
if (meta.learnersList() != null) {
for (String learner : meta.learnersList()) {
final PeerId peer = new PeerId();
peer.parse(learner);
conf.addLearner(peer);
}
}
return conf;
}
use of org.apache.ignite.raft.jraft.conf.Configuration in project ignite-3 by apache.
the class LogManagerImpl method appendEntries.
@Override
public void appendEntries(final List<LogEntry> entries, final StableClosure done) {
Requires.requireNonNull(done, "done");
if (this.hasError) {
entries.clear();
Utils.runClosureInThread(nodeOptions.getCommonExecutor(), done, new Status(RaftError.EIO, "Corrupted LogStorage"));
return;
}
boolean doUnlock = true;
this.writeLock.lock();
try {
if (!entries.isEmpty() && !checkAndResolveConflict(entries, done)) {
// If checkAndResolveConflict returns false, the done will be called in it.
entries.clear();
return;
}
for (int i = 0; i < entries.size(); i++) {
final LogEntry entry = entries.get(i);
// Set checksum after checkAndResolveConflict
if (this.raftOptions.isEnableLogEntryChecksum()) {
entry.setChecksum(entry.checksum());
}
if (entry.getType() == EntryType.ENTRY_TYPE_CONFIGURATION) {
Configuration oldConf = new Configuration();
if (entry.getOldPeers() != null) {
oldConf = new Configuration(entry.getOldPeers(), entry.getOldLearners());
}
final ConfigurationEntry conf = new ConfigurationEntry(entry.getId(), new Configuration(entry.getPeers(), entry.getLearners()), oldConf);
this.configManager.add(conf);
}
}
if (!entries.isEmpty()) {
done.setFirstLogIndex(entries.get(0).getId().getIndex());
this.logsInMemory.addAll(entries);
}
done.setEntries(entries);
int retryTimes = 0;
final EventTranslator<StableClosureEvent> translator = (event, sequence) -> {
event.reset();
event.groupId = groupId;
event.type = EventType.OTHER;
event.done = done;
};
while (true) {
if (tryOfferEvent(done, translator)) {
break;
} else {
retryTimes++;
if (retryTimes > APPEND_LOG_RETRY_TIMES) {
reportError(RaftError.EBUSY.getNumber(), "LogManager is busy, disk queue overload.");
return;
}
ThreadHelper.onSpinWait();
}
}
doUnlock = false;
if (!wakeupAllWaiter(this.writeLock)) {
notifyLastLogIndexListeners();
}
} finally {
if (doUnlock) {
this.writeLock.unlock();
}
}
}
use of org.apache.ignite.raft.jraft.conf.Configuration in project ignite-3 by apache.
the class RocksDBLogStorage method load.
private void load(final ConfigurationManager confManager) {
checkState();
try (final RocksIterator it = this.db.newIterator(this.confHandle, this.totalOrderReadOptions)) {
it.seekToFirst();
while (it.isValid()) {
final byte[] ks = it.key();
final byte[] bs = it.value();
// LogEntry index
if (ks.length == 8) {
final LogEntry entry = this.logEntryDecoder.decode(bs);
if (entry != null) {
if (entry.getType() == EnumOutter.EntryType.ENTRY_TYPE_CONFIGURATION) {
final ConfigurationEntry confEntry = new ConfigurationEntry();
confEntry.setId(new LogId(entry.getId().getIndex(), entry.getId().getTerm()));
confEntry.setConf(new Configuration(entry.getPeers(), entry.getLearners()));
if (entry.getOldPeers() != null) {
confEntry.setOldConf(new Configuration(entry.getOldPeers(), entry.getOldLearners()));
}
if (confManager != null) {
confManager.add(confEntry);
}
}
} else {
LOG.warn("Fail to decode conf entry at index {}, the log data is: {}.", Bits.getLong(ks, 0), BytesUtil.toHex(bs));
}
} else {
if (Arrays.equals(FIRST_LOG_IDX_KEY, ks)) {
setFirstLogIndex(Bits.getLong(bs, 0));
truncatePrefixInBackground(0L, this.firstLogIndex);
} else {
LOG.warn("Unknown entry in configuration storage key={}, value={}.", BytesUtil.toHex(ks), BytesUtil.toHex(bs));
}
}
it.next();
}
}
}
use of org.apache.ignite.raft.jraft.conf.Configuration in project ignite-3 by apache.
the class ChangePeersRequestProcessor method processRequest0.
@Override
protected Message processRequest0(final CliRequestContext ctx, final ChangePeersRequest request, final IgniteCliRpcRequestClosure done) {
final List<PeerId> oldConf = ctx.node.listPeers();
final Configuration conf = new Configuration();
for (final String peerIdStr : request.newPeersList()) {
final PeerId peer = new PeerId();
if (peer.parse(peerIdStr)) {
conf.addPeer(peer);
} else {
return //
RaftRpcFactory.DEFAULT.newResponse(msgFactory(), RaftError.EINVAL, "Fail to parse peer id %s", peerIdStr);
}
}
LOG.info("Receive ChangePeersRequest to {} from {}, new conf is {}", ctx.node.getNodeId(), done.getRpcCtx().getRemoteAddress(), conf);
ctx.node.changePeers(conf, status -> {
if (!status.isOk()) {
done.run(status);
} else {
ChangePeersResponse req = msgFactory().changePeersResponse().oldPeersList(oldConf.stream().map(Object::toString).collect(toList())).newPeersList(conf.getPeers().stream().map(Object::toString).collect(toList())).build();
done.sendResponse(req);
}
});
return null;
}
use of org.apache.ignite.raft.jraft.conf.Configuration in project ignite-3 by apache.
the class TestCluster method start.
public boolean start(Endpoint listenAddr, boolean emptyPeers, int snapshotIntervalSecs, boolean enableMetrics, SnapshotThrottle snapshotThrottle, RaftOptions raftOptions, int priority) throws IOException {
this.lock.lock();
try {
if (this.serverMap.get(listenAddr) != null) {
return true;
}
// Start node in non shared pools mode. Pools will be managed by node itself.
NodeOptions nodeOptions = new NodeOptions();
nodeOptions.setServerName(listenAddr.toString());
nodeOptions.setElectionTimeoutMs(this.electionTimeoutMs);
nodeOptions.setEnableMetrics(enableMetrics);
nodeOptions.setSnapshotThrottle(snapshotThrottle);
nodeOptions.setSnapshotIntervalSecs(snapshotIntervalSecs);
nodeOptions.setServiceFactory(this.raftServiceFactory);
if (raftOptions != null) {
nodeOptions.setRaftOptions(raftOptions);
}
String serverDataPath = this.dataPath + File.separator + listenAddr.toString().replace(':', '_');
new File(serverDataPath).mkdirs();
nodeOptions.setLogUri(serverDataPath + File.separator + "logs");
nodeOptions.setRaftMetaUri(serverDataPath + File.separator + "meta");
nodeOptions.setSnapshotUri(serverDataPath + File.separator + "snapshot");
nodeOptions.setElectionPriority(priority);
// Align rpc options with election timeout.
nodeOptions.setRpcConnectTimeoutMs(this.electionTimeoutMs / 3);
nodeOptions.setRpcDefaultTimeout(this.electionTimeoutMs / 2);
// Reduce default threads count per test node.
nodeOptions.setRaftRpcThreadPoolSize(Utils.cpus());
nodeOptions.setTimerPoolSize(Utils.cpus() * 2);
nodeOptions.setRpcProcessorThreadPoolSize(Utils.cpus() * 3);
nodeOptions.setElectionTimeoutStrategy(new ExponentialBackoffTimeoutStrategy());
MockStateMachine fsm = new MockStateMachine(listenAddr);
nodeOptions.setFsm(fsm);
if (!emptyPeers)
nodeOptions.setInitialConf(new Configuration(this.peers, this.learners));
List<NetworkAddress> addressList = (emptyPeers ? Stream.<PeerId>empty() : peers.stream()).map(PeerId::getEndpoint).map(JRaftUtils::addressFromEndpoint).collect(toList());
NodeManager nodeManager = new NodeManager();
ClusterService clusterService = ClusterServiceTestUtils.clusterService(testInfo, listenAddr.getPort(), new StaticNodeFinder(addressList), new TestScaleCubeClusterServiceFactory());
var rpcClient = new IgniteRpcClient(clusterService);
nodeOptions.setRpcClient(rpcClient);
ExecutorService requestExecutor = JRaftUtils.createRequestExecutor(nodeOptions);
var rpcServer = new TestIgniteRpcServer(clusterService, nodeManager, nodeOptions, requestExecutor);
clusterService.start();
if (optsClo != null)
optsClo.accept(nodeOptions);
RaftGroupService server = new RaftGroupService(this.name, new PeerId(listenAddr, 0, priority), nodeOptions, rpcServer, nodeManager) {
@Override
public synchronized void shutdown() {
// This stop order is consistent with JRaftServerImpl
rpcServer.shutdown();
ExecutorServiceHelper.shutdownAndAwaitTermination(requestExecutor);
super.shutdown();
// Network service must be stopped after a node because raft initiates timeoutnowrequest on stop for faster
// leader election.
clusterService.stop();
}
};
this.serverMap.put(listenAddr, server);
Node node = server.start();
this.fsms.put(new PeerId(listenAddr, 0), fsm);
this.nodes.add((NodeImpl) node);
return true;
} finally {
this.lock.unlock();
}
}
Aggregations