use of bftsmart.reconfiguration.views.NodeNetwork in project bftsmart by blockchain-jd-com.
the class DurableStateManager method SMRequestDeliver.
@Override
public void SMRequestDeliver(SMMessage msg, boolean isBFT) {
LOGGER.debug("(TOMLayer.SMRequestDeliver) invoked method");
if (topology.getStaticConf().isStateTransferEnabled() && dt.getRecoverer() != null) {
LOGGER.debug("(TOMLayer.SMRequestDeliver) The state transfer protocol is enabled");
LOGGER.info("(TOMLayer.SMRequestDeliver) I received a state request for CID {} from replica {}", msg.getCID(), msg.getSender());
CSTSMMessage cstMsg = (CSTSMMessage) msg;
CSTRequestF1 cstConfig = cstMsg.getCstConfig();
boolean sendState = cstConfig.getCheckpointReplica() == topology.getStaticConf().getProcessId();
if (sendState)
LOGGER.info("(TOMLayer.SMRequestDeliver) I should be the one sending the state");
LOGGER.debug("--- state asked");
int[] targets = { msg.getSender() };
NodeNetwork address = topology.getCurrentView().getAddress(topology.getStaticConf().getProcessId());
String myIp = address.getHost();
int myId = topology.getStaticConf().getProcessId();
int port = 4444 + myId;
address = new NodeNetwork(myIp, port, -1, address.isConsensusSecure(), false);
cstConfig.setAddress(address);
CSTSMMessage reply = new CSTSMMessage(myId, msg.getCID(), TOMUtil.SM_REPLY, cstConfig, null, topology.getCurrentView(), tomLayer.getSynchronizer().getLCManager().getLastReg(), tomLayer.execManager.getCurrentLeader());
StateSenderServer stateServer = new StateSenderServer(port);
stateServer.setRecoverable(dt.getRecoverer());
stateServer.setRequest(cstConfig);
new Thread(stateServer).start();
tomLayer.getCommunication().send(targets, reply);
}
}
use of bftsmart.reconfiguration.views.NodeNetwork in project bftsmart by blockchain-jd-com.
the class DurableStateManager method SMReplyDeliver.
@Override
public void SMReplyDeliver(SMMessage msg, boolean isBFT) {
lockTimer.lock();
CSTSMMessage reply = (CSTSMMessage) msg;
if (topology.getStaticConf().isStateTransferEnabled()) {
LOGGER.debug("(TOMLayer.SMReplyDeliver) The state transfer protocol is enabled");
LOGGER.info("(TOMLayer.SMReplyDeliver) I received a state reply for CID {} from replica {}", reply.getCID(), reply.getSender());
LOGGER.info("--- Received CID: {}, Waiting CID: {}", reply.getCID(), waitingCID);
if (waitingCID != -1 && reply.getCID() == waitingCID) {
int currentRegency = -1;
int currentLeader = -1;
View currentView = null;
if (!appStateOnly) {
senderRegencies.put(reply.getSender(), reply.getRegency());
senderLeaders.put(reply.getSender(), reply.getLeader());
senderViews.put(reply.getSender(), reply.getView());
// msg.getState().getCertifiedDecision(SVController));
if (enoughRegencies(reply.getRegency()))
currentRegency = reply.getRegency();
if (enoughLeaders(reply.getLeader()))
currentLeader = reply.getLeader();
if (enoughViews(reply.getView())) {
currentView = reply.getView();
if (!currentView.isMember(topology.getStaticConf().getProcessId())) {
LOGGER.error("Not a member!");
}
}
// if (enoughProofs(waitingCID, this.tomLayer.getSynchronizer().getLCManager()))
// currentProof = msg.getState().getCertifiedDecision(SVController);
} else {
currentLeader = tomLayer.execManager.getCurrentLeader();
currentRegency = tomLayer.getSynchronizer().getLCManager().getLastReg();
currentView = topology.getCurrentView();
}
LOGGER.info("(TOMLayer.SMReplyDeliver) The reply is for the CID that I want!");
NodeNetwork address = reply.getCstConfig().getAddress();
Socket clientSocket;
ApplicationState stateReceived = null;
try {
clientSocket = new Socket(address.getHost(), address.getConsensusPort());
ObjectInputStream in = new ObjectInputStream(clientSocket.getInputStream());
stateReceived = (ApplicationState) in.readObject();
} catch (UnknownHostException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (ClassNotFoundException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
if (stateReceived instanceof CSTState) {
senderStates.put(reply.getSender(), stateReceived);
if (reply.getSender() == cstRequest.getCheckpointReplica())
this.stateCkp = (CSTState) stateReceived;
if (reply.getSender() == cstRequest.getLogLower())
this.stateLower = (CSTState) stateReceived;
if (reply.getSender() == cstRequest.getLogUpper())
this.stateUpper = (CSTState) stateReceived;
}
if (senderStates.size() == 3) {
CommandsInfo[] lowerLog = stateLower.getLogLower();
CommandsInfo[] upperLog = stateUpper.getLogUpper();
LOGGER.debug("lowerLog ");
if (lowerLog != null)
LOGGER.debug("Lower log length size: {} ", lowerLog.length);
LOGGER.debug("upperLog ");
if (upperLog != null)
LOGGER.debug("Upper log length size: {} ", upperLog.length);
boolean haveState = false;
byte[] lowerbytes = TOMUtil.getBytes(lowerLog);
LOGGER.debug("Log lower bytes size: {}", lowerbytes.length);
byte[] upperbytes = TOMUtil.getBytes(upperLog);
LOGGER.debug("Log upper bytes size: {}", upperbytes.length);
byte[] lowerLogHash = new byte[0];
byte[] upperLogHash = new byte[0];
try {
lowerLogHash = TOMUtil.computeHash(lowerbytes);
upperLogHash = TOMUtil.computeHash(upperbytes);
} catch (NoSuchAlgorithmException e) {
e.printStackTrace();
}
// validate lower log
if (Arrays.equals(stateCkp.getHashLogLower(), lowerLogHash))
haveState = true;
else
LOGGER.error("Lower log don't match");
// validate upper log
if (!haveState || !Arrays.equals(stateCkp.getHashLogUpper(), upperLogHash)) {
haveState = false;
LOGGER.error("Upper log don't match");
}
CSTState statePlusLower = new CSTState(stateCkp.getSerializedState(), TOMUtil.getBytes(stateCkp.getSerializedState()), stateLower.getLogLower(), stateCkp.getHashLogLower(), null, null, stateCkp.getCheckpointCID(), stateUpper.getCheckpointCID(), topology.getStaticConf().getProcessId());
if (haveState) {
// validate checkpoint
LOGGER.debug("validating checkpoint!!!");
dt.getRecoverer().setState(statePlusLower);
byte[] currentStateHash = ((DurabilityCoordinator) dt.getRecoverer()).getCurrentStateHash();
if (!Arrays.equals(currentStateHash, stateUpper.getHashCheckpoint())) {
LOGGER.error("ckp hash don't match");
haveState = false;
}
}
LOGGER.info("-- current regency: {} ", currentRegency);
LOGGER.info("-- current leader: {}", currentLeader);
LOGGER.info("-- current view: {}", currentView);
if (currentRegency > -1 && currentLeader > -1 && currentView != null && haveState && (!isBFT || /* currentProof != null || */
appStateOnly)) {
LOGGER.info("---- RECEIVED VALID STATE ----");
LOGGER.info("(TOMLayer.SMReplyDeliver) The state of those replies is good!");
LOGGER.info("(TOMLayer.SMReplyDeliver) CID State requested: {}", reply.getCID());
LOGGER.info("(TOMLayer.SMReplyDeliver) CID State received: {}", stateUpper.getLastCID());
if (currentRegency > tomLayer.getSynchronizer().getLCManager().getCurrentRegency().getId()) {
tomLayer.getSynchronizer().getLCManager().jumpToRegency(new LeaderRegency(currentLeader, currentRegency));
tomLayer.execManager.setNewLeader(currentLeader);
}
// one
if (currentRegency > 0)
tomLayer.getSynchronizer().removeSTOPretransmissions(currentRegency - 1);
LOGGER.debug("trying to acquire deliverlock");
dt.deliverLock();
LOGGER.debug("acquired");
// this makes the isRetrievingState() evaluates to false
waitingCID = -1;
dt.update(stateUpper);
// synchronization phase
if (!appStateOnly && execManager.stopped()) {
Queue<ConsensusMessage> stoppedMsgs = execManager.getStoppedMsgs();
for (ConsensusMessage stopped : stoppedMsgs) {
if (stopped.getNumber() > state.getLastCID())
execManager.addOutOfContextMessage(stopped);
}
execManager.clearStopped();
execManager.restart();
}
LOGGER.info("Processing out of context messages");
tomLayer.processOutOfContext();
if (topology.getCurrentViewId() != currentView.getId()) {
LOGGER.info("Installing current view!");
topology.reconfigureTo(currentView);
}
isInitializing = false;
dt.canDeliver();
dt.deliverUnlock();
reset();
LOGGER.info("I updated the state!");
// tomLayer.requestsTimer.startTimer();
if (stateTimer != null)
stateTimer.cancel();
if (appStateOnly) {
appStateOnly = false;
tomLayer.getSynchronizer().resumeLC();
}
} else if (state == null && (topology.getCurrentViewN() / 2) < getReplies()) {
LOGGER.error("---- DIDNT RECEIVE STATE ----");
LOGGER.error("(TOMLayer.SMReplyDeliver) I have more than {} messages that are no good!", (topology.getCurrentViewN() / 2));
waitingCID = -1;
reset();
if (stateTimer != null)
stateTimer.cancel();
if (appStateOnly) {
requestState();
}
} else if (!haveState) {
LOGGER.error("---- RECEIVED INVALID STATE ----");
LOGGER.error("(TOMLayer.SMReplyDeliver) The replica from which I expected the state, sent one which doesn't match the hash of the others, or it never sent it at all");
reset();
requestState();
if (stateTimer != null)
stateTimer.cancel();
}
}
}
}
lockTimer.unlock();
}
use of bftsmart.reconfiguration.views.NodeNetwork in project bftsmart by blockchain-jd-com.
the class ServiceReplica method receiveMessages.
public void receiveMessages(int[] consId, int[] regencies, int[] leaders, CertifiedDecision[] cDecs, TOMMessage[][] requests, List<byte[]> asyncResponseLinkedList, boolean isRollback) {
int numRequests = 0;
int consensusCount = 0;
List<TOMMessage> toBatch = new ArrayList<>();
List<MessageContext> msgCtxts = new ArrayList<>();
boolean noop = true;
for (TOMMessage[] requestsFromConsensus : requests) {
TOMMessage firstRequest = requestsFromConsensus[0];
int requestCount = 0;
noop = true;
for (TOMMessage request : requestsFromConsensus) {
LOGGER.info("(ServiceReplica.receiveMessages) request view id = {}, curr view id = {}, request type = {}", request.getViewID(), serverViewController.getCurrentViewId(), request.getReqType());
// 暂时没有节点间的视图ID同步过程,在处理RECONFIG这类更新视图的操作时先不考虑视图ID落后的情况
if (request.getViewID() <= serverViewController.getCurrentViewId()) {
if (request.getReqType() == TOMMessageType.ORDERED_REQUEST || request.getReqType() == TOMMessageType.RECONFIG) {
noop = false;
numRequests++;
MessageContext msgCtx = new MessageContext(request.getSender(), request.getViewID(), request.getReqType(), request.getSession(), request.getSequence(), request.getOperationId(), request.getReplyServer(), request.serializedMessageSignature, firstRequest.timestamp, request.numOfNonces, request.seed, regencies[consensusCount], leaders[consensusCount], consId[consensusCount], cDecs[consensusCount].getConsMessages(), firstRequest, false);
if (requestCount + 1 == requestsFromConsensus.length) {
msgCtx.setLastInBatch();
}
request.deliveryTime = System.nanoTime();
if (executor instanceof PreComputeBatchExecutable) {
LOGGER.debug("(ServiceReplica.receiveMessages) Batching request from {}", request.getSender());
// This is used to deliver the content decided by a consensus instance directly
// to
// a Recoverable object. It is useful to allow the application to create a log
// and
// store the proof associated with decisions (which are needed by replicas
// that are asking for a state transfer).
// if (this.recoverer != null)
// this.recoverer.Op(msgCtx.getConsensusId(), request.getContent(), msgCtx);
// deliver requests and contexts to the executor later
msgCtxts.add(msgCtx);
toBatch.add(request);
if (request.getReqType() == TOMMessageType.RECONFIG) {
serverViewController.enqueueUpdate(request);
}
} else if (executor instanceof FIFOExecutable) {
LOGGER.debug("(ServiceReplica.receiveMessages) Delivering request from {} via FifoExecutable", request.getSender());
// that are asking for a state transfer).
if (this.recoverer != null)
this.recoverer.Op(msgCtx.getConsensusId(), request.getContent(), msgCtx);
// This is used to deliver the requests to the application and obtain a reply to
// deliver
// to the clients. The raw decision is passed to the application in the line
// above.
byte[] response = ((FIFOExecutable) executor).executeOrderedFIFO(request.getContent(), msgCtx, request.getSender(), request.getOperationId());
// Generate the messages to send back to the clients
request.reply = new TOMMessage(id, request.getSession(), request.getSequence(), request.getOperationId(), response, null, serverViewController.getCurrentViewId(), request.getReqType());
LOGGER.debug("(ServiceReplica.receiveMessages) sending reply to {}", request.getSender());
replier.manageReply(request, msgCtx);
} else if (executor instanceof SingleExecutable) {
LOGGER.debug("(ServiceReplica.receiveMessages) Delivering request from {} via SingleExecutable", request.getSender());
// that are asking for a state transfer).
if (this.recoverer != null)
this.recoverer.Op(msgCtx.getConsensusId(), request.getContent(), msgCtx);
// This is used to deliver the requests to the application and obtain a reply to
// deliver
// to the clients. The raw decision is passed to the application in the line
// above.
byte[] response = ((SingleExecutable) executor).executeOrdered(request.getContent(), msgCtx);
// Generate the messages to send back to the clients
request.reply = new TOMMessage(id, request.getSession(), request.getSequence(), request.getOperationId(), response, null, serverViewController.getCurrentViewId(), request.getReqType());
LOGGER.debug("(ServiceReplica.receiveMessages) sending reply to {}", request.getSender());
replier.manageReply(request, msgCtx);
} else {
throw new UnsupportedOperationException("Non-existent interface");
}
} else // else if (request.getReqType() == TOMMessageType.RECONFIG) {
// noop = false;
// numRequests++;
// serverViewController.enqueueUpdate(request);
// }
{
throw new RuntimeException("Should never reach here!");
}
}
// else if (request.getViewID() < serverViewController.getCurrentViewId()) { // message sender had an old
// // view,
// // resend the message to
// // him (but only if it came from
// // consensus an not state transfer)
// View view = serverViewController.getCurrentView();
//
// List<NodeNetwork> addressesTemp = new ArrayList<>();
//
// for (int i = 0; i < view.getProcesses().length; i++) {
// int cpuId = view.getProcesses()[i];
// NodeNetwork inetSocketAddress = view.getAddress(cpuId);
//
// if (inetSocketAddress.getHost().equals("0.0.0.0")) {
// // proc docker env
// String host = serverViewController.getStaticConf().getOuterHostConfig().getHost(cpuId);
//
// NodeNetwork tempSocketAddress = new NodeNetwork(host, inetSocketAddress.getConsensusPort(),
// -1);
// LOGGER.info("I am proc {}, tempSocketAddress.getAddress().getHostAddress() = {}",
// serverViewController.getStaticConf().getProcessId(), host);
// addressesTemp.add(tempSocketAddress);
// } else {
// LOGGER.info("I am proc {}, tempSocketAddress.getAddress().getHostAddress() = {}",
// serverViewController.getStaticConf().getProcessId(), inetSocketAddress.toUrl());
// addressesTemp.add(new NodeNetwork(inetSocketAddress.getHost(),
// inetSocketAddress.getConsensusPort(), -1));
// }
// }
//
// View replyView = new View(view.getId(), view.getProcesses(), view.getF(),
// addressesTemp.toArray(new NodeNetwork[addressesTemp.size()]));
// LOGGER.info("I am proc {}, view = {}, hashCode = {}, reply View = {}",
// this.serverViewController.getStaticConf().getProcessId(), view, view.hashCode(), replyView);
//
// getTomLayer().getCommunication().send(new int[] { request.getSender() },
// new TOMMessage(serverViewController.getStaticConf().getProcessId(), request.getSession(),
// request.getSequence(), request.getOperationId(), TOMUtil.getBytes(replyView),
// serverViewController.getCurrentViewId(), request.getReqType()));
// }
requestCount++;
}
// hence the invocation of "noop"
if (noop && this.recoverer != null) {
LOGGER.debug("(ServiceReplica.receiveMessages) I am proc {}, host = {}, port = {}. Delivering a no-op to the recoverer", this.serverViewController.getStaticConf().getProcessId(), this.serverViewController.getStaticConf().getRemoteAddress(this.serverViewController.getStaticConf().getProcessId()).getHost(), this.serverViewController.getStaticConf().getRemoteAddress(this.serverViewController.getStaticConf().getProcessId()).getConsensusPort());
LOGGER.debug("I am proc {} , host = {}, port = {}.--- A consensus instance finished, but there were no commands to deliver to the application.", this.serverViewController.getStaticConf().getProcessId(), this.serverViewController.getStaticConf().getRemoteAddress(this.serverViewController.getStaticConf().getProcessId()).getHost(), this.serverViewController.getStaticConf().getRemoteAddress(this.serverViewController.getStaticConf().getProcessId()).getConsensusPort());
LOGGER.debug("I am proc {} , host = {}, port = {}.--- Notifying recoverable about a blank consensus.", this.serverViewController.getStaticConf().getProcessId(), this.serverViewController.getStaticConf().getRemoteAddress(this.serverViewController.getStaticConf().getProcessId()).getHost(), this.serverViewController.getStaticConf().getRemoteAddress(this.serverViewController.getStaticConf().getProcessId()).getConsensusPort());
byte[][] batch = null;
MessageContext[] msgCtx = null;
if (requestsFromConsensus.length > 0) {
// Make new batch to deliver
batch = new byte[requestsFromConsensus.length][];
msgCtx = new MessageContext[requestsFromConsensus.length];
// Put messages in the batch
int line = 0;
for (TOMMessage m : requestsFromConsensus) {
batch[line] = m.getContent();
msgCtx[line] = new MessageContext(m.getSender(), m.getViewID(), m.getReqType(), m.getSession(), m.getSequence(), m.getOperationId(), m.getReplyServer(), m.serializedMessageSignature, firstRequest.timestamp, m.numOfNonces, m.seed, regencies[consensusCount], leaders[consensusCount], consId[consensusCount], cDecs[consensusCount].getConsMessages(), firstRequest, true);
msgCtx[line].setLastInBatch();
line++;
}
}
this.recoverer.noOp(consId[consensusCount], batch, msgCtx);
// MessageContext msgCtx = new MessageContext(-1, -1, null, -1, -1, -1, -1,
// null, // Since it is a noop, there is no need to pass info about the
// client...
// -1, 0, 0, regencies[consensusCount], leaders[consensusCount],
// consId[consensusCount], cDecs[consensusCount].getConsMessages(), //... but
// there is still need to pass info about the consensus
// null, true); // there is no command that is the first of the batch, since it
// is a noop
// msgCtx.setLastInBatch();
// this.recoverer.noOp(msgCtx.getConsensusId(), msgCtx);
}
consensusCount++;
}
if (executor instanceof PreComputeBatchExecutable && numRequests > 0) {
// Make new batch to deliver
byte[][] batch = new byte[numRequests][];
// ReplyContext replyContext = new ReplyContext().buildId(id)
// .buildCurrentViewId(serverViewController.getCurrentViewId())
// .buildNumRepliers(serverViewController.getStaticConf().getNumRepliers()).buildRepMan(repMan)
// .buildReplier(replier);
// List<ReplyContextMessage> replyContextMessages = new ArrayList<>();
// Put messages in the batch
int line = 0;
for (TOMMessage m : toBatch) {
if (m.getReqType() == TOMMessageType.RECONFIG) {
// 对于reconfig类型的消息,扩展消息才是交易本身
batch[line] = ((ReconfigureRequest) TOMUtil.getObject(m.getContent())).getExtendInfo();
} else {
batch[line] = m.getContent();
}
line++;
}
MessageContext[] msgContexts = new MessageContext[msgCtxts.size()];
msgContexts = msgCtxts.toArray(msgContexts);
// Deliver the batch and wait for replies
if (isRollback == false) {
byte[][] replies = ((PreComputeBatchExecutable) executor).executeBatch(batch, msgContexts);
}
if (toBatch.size() != asyncResponseLinkedList.size()) {
LOGGER.debug("(ServiceReplica.receiveMessages) toBatch.size() != asyncResponseLinkedList.size()");
return;
}
// Send the replies back to the client
for (int index = 0; index < toBatch.size(); index++) {
TOMMessage request = toBatch.get(index);
if (request.getReqType() == TOMMessageType.RECONFIG) {
continue;
}
// if request with backward viewid, reply msg will include view content
if (request.getViewID() < serverViewController.getCurrentViewId()) {
View view = serverViewController.getCurrentView();
List<NodeNetwork> addressesTemp = new ArrayList<>();
for (int i = 0; i < view.getProcesses().length; i++) {
int cpuId = view.getProcesses()[i];
NodeNetwork inetSocketAddress = view.getAddress(cpuId);
if (inetSocketAddress.getHost().equals("0.0.0.0")) {
// proc docker env
String host = serverViewController.getStaticConf().getOuterHostConfig().getHost(cpuId);
NodeNetwork tempSocketAddress = new NodeNetwork(host, inetSocketAddress.getConsensusPort(), -1, inetSocketAddress.isConsensusSecure(), false);
LOGGER.info("I am proc {}, tempSocketAddress.getAddress().getHostAddress() = {}", serverViewController.getStaticConf().getProcessId(), host);
addressesTemp.add(tempSocketAddress);
} else {
LOGGER.info("I am proc {}, tempSocketAddress.getAddress().getHostAddress() = {}", serverViewController.getStaticConf().getProcessId(), inetSocketAddress.toString());
addressesTemp.add(new NodeNetwork(inetSocketAddress.getHost(), inetSocketAddress.getConsensusPort(), -1, inetSocketAddress.isConsensusSecure(), false));
}
}
View replyView = new View(view.getId(), view.getProcesses(), view.getF(), addressesTemp.toArray(new NodeNetwork[addressesTemp.size()]));
LOGGER.info("I am proc {}, view = {}, reply View = {}", this.serverViewController.getStaticConf().getProcessId(), view, replyView);
request.reply = new TOMMessage(id, request.getSession(), request.getSequence(), request.getOperationId(), asyncResponseLinkedList.get(index), TOMUtil.getBytes(replyView), serverViewController.getCurrentViewId(), request.getReqType());
} else {
request.reply = new TOMMessage(id, request.getSession(), request.getSequence(), request.getOperationId(), asyncResponseLinkedList.get(index), null, serverViewController.getCurrentViewId(), request.getReqType());
}
if (serverViewController.getStaticConf().getNumRepliers() > 0) {
LOGGER.debug("(ServiceReplica.receiveMessages) sending reply to {} with sequence number {} and operation ID {} via ReplyManager", request.getSender(), request.getSequence(), request.getOperationId());
repMan.send(request);
} else {
LOGGER.debug("(ServiceReplica.receiveMessages) sending reply to {} with sequence number {} and operation ID {}", request.getSender(), request.getSequence(), request.getOperationId());
replier.manageReply(request, msgContexts[index]);
// cs.send(new int[]{request.getSender()}, request.reply);
}
}
// DEBUG
LOGGER.debug("BATCHEXECUTOR END");
}
// End of: if (executor instanceof PreComputeBatchExecutable && numRequests > 0)
}
use of bftsmart.reconfiguration.views.NodeNetwork in project bftsmart by blockchain-jd-com.
the class ServerViewController method reconfigure.
private byte[] reconfigure(List<String> jSetInfo, List<Integer> jSet, List<Integer> rSet, int f, int cid) {
// ReconfigureRequest request = (ReconfigureRequest) TOMUtil.getObject(req);
// Hashtable<Integer, String> props = request.getProperties();
// int f = Integer.valueOf(props.get(CHANGE_F));
lastJoinStet = new int[jSet.size()];
int[] nextV = new int[currentView.getN() + jSet.size() - rSet.size()];
int p = 0;
boolean forceLC = false;
for (int i = 0; i < jSet.size(); i++) {
lastJoinStet[i] = jSet.get(i);
nextV[p++] = jSet.get(i);
}
for (int i = 0; i < currentView.getProcesses().length; i++) {
if (!contains(currentView.getProcesses()[i], rSet)) {
nextV[p++] = currentView.getProcesses()[i];
} else if (tomLayer.execManager.getCurrentLeader() == currentView.getProcesses()[i]) {
// 如果要删除的参与方集合中包含了当前的领导者,则需要强制触发领导者切换流程;
forceLC = true;
}
}
if (f < 0) {
f = currentView.getF();
}
NodeNetwork[] addresses = new NodeNetwork[nextV.length];
for (int i = 0; i < nextV.length; i++) {
int processId = nextV[i];
addresses[i] = getStaticConf().getRemoteAddress(processId);
NodeNetwork nodeNetwork = currentView.getAddress(processId);
if (nodeNetwork != null) {
addresses[i].setMonitorPort(nodeNetwork.getMonitorPort());
}
LOGGER.info("I am {}, network[{}] -> {} !", this.getStaticConf().getProcessId(), processId, addresses[i]);
}
// View newV = new View(currentView.getId() + 1, nextV, f, addresses);
// f的值需要动态计算
View newV = new View(currentView.getId() + 1, nextV, (nextV.length - 1) / 3, addresses);
LOGGER.info("I am proc {}, new view: {}", this.getStaticConf().getProcessId(), newV);
LOGGER.info("I am proc {}, installed on CID: {}", this.getStaticConf().getProcessId(), cid);
LOGGER.info("I am proc {}, lastJoinSet: {}", this.getStaticConf().getProcessId(), jSet);
// TODO:Remove all information stored about each process in rSet
// processes execute the leave!!!
reconfigureTo(newV);
LOGGER.info("I am proc {}, after reconfigure ,currview = {}", this.getStaticConf().getProcessId(), this.currentView);
if (forceLC) {
// TODO: Reactive it and make it work
LOGGER.info("Shortening LC timeout");
// tomLayer.requestsTimer.stopTimer();
tomLayer.requestsTimer.setShortTimeout(3000);
// tomLayer.requestsTimer.startTimer();
// tomLayer.triggerTimeout(new LinkedList<TOMMessage>());
}
LOGGER.info("I am proc {}, I will send ReconfigureReply!", this.getStaticConf().getProcessId());
List<NodeNetwork> addressesTemp = new ArrayList<>();
for (int i = 0; i < newV.getProcesses().length; i++) {
int cpuId = newV.getProcesses()[i];
NodeNetwork nodeNetwork = newV.getAddress(cpuId);
if (nodeNetwork.getHost().equals("0.0.0.0")) {
// proc docker env
String host = this.getStaticConf().getOuterHostConfig().getHost(cpuId);
NodeNetwork nodeNetworkNew = new NodeNetwork(host, nodeNetwork.getConsensusPort(), -1, nodeNetwork.isConsensusSecure(), false);
LOGGER.info("I am proc {}, tempSocketAddress.getAddress().getHostAddress() = {}", this.getStaticConf().getProcessId(), host);
addressesTemp.add(nodeNetworkNew);
} else {
addressesTemp.add(new NodeNetwork(nodeNetwork.getHost(), nodeNetwork.getConsensusPort(), -1, nodeNetwork.isConsensusSecure(), false));
}
}
View replyView = new View(newV.getId(), newV.getProcesses(), newV.getF(), addressesTemp.toArray(new NodeNetwork[addressesTemp.size()]));
LOGGER.info("I am proc {}, I adjust reply view, reply view = {}", this.getStaticConf().getProcessId(), replyView);
// 更新 TOMConfiguration
staticConf.updateConfiguration(replyView.getProcesses());
return TOMUtil.getBytes(new ReconfigureReply(replyView, jSetInfo.toArray(new String[0]), cid, tomLayer.execManager.getCurrentLeader()));
}
use of bftsmart.reconfiguration.views.NodeNetwork in project bftsmart by blockchain-jd-com.
the class ServerViewController method reconfigureTo.
@Override
public final void reconfigureTo(View newView) {
// 防止reconfig过程中其他线程比如view sync 更改视图
if (newView.getId() < this.getCurrentView().getId()) {
LOGGER.info("reconfigureTo error, new view id little than loacal view id!");
return;
}
int processId = this.getStaticConf().getProcessId();
NodeNetwork localNodeNetwork = this.getCurrentView().getAddress(processId);
this.currentView = newView;
LOGGER.info("I am {}, my new current view = {} !!!", processId, this.currentView);
getViewStore().storeView(this.currentView);
if (newView.isMember(getStaticConf().getProcessId())) {
this.currentView.setAddresses(processId, localNodeNetwork);
// membro da view atual
otherProcesses = new int[currentView.getProcesses().length - 1];
int c = 0;
for (int i = 0; i < currentView.getProcesses().length; i++) {
if (currentView.getProcesses()[i] != getStaticConf().getProcessId()) {
otherProcesses[c++] = currentView.getProcesses()[i];
}
}
// error use of quorum , refactor later
this.quorumBFT = (int) Math.ceil((this.currentView.getN() + this.currentView.getF()) / 2);
this.quorumCFT = (int) Math.ceil(this.currentView.getN() / 2);
} else if (this.currentView != null && this.currentView.isMember(getStaticConf().getProcessId())) {
// TODO: Left the system in newView -> LEAVE
// CODE for LEAVE
} else {
// TODO: Didn't enter the system yet
}
}
Aggregations