use of org.jgroups.protocols.pbcast.NakAckHeader2 in project JGroups by belaban.
the class MessageTest method addHeaders.
protected static void addHeaders(Message msg) {
TpHeader tp_hdr = new TpHeader("DemoChannel2");
msg.putHeader(UDP_ID, tp_hdr);
PingHeader ping_hdr = new PingHeader(PingHeader.GET_MBRS_REQ).clusterName("demo-cluster");
msg.putHeader(PING_ID, ping_hdr);
NakAckHeader2 nak_hdr = NakAckHeader2.createXmitRequestHeader(Util.createRandomAddress("S"));
msg.putHeader(NAKACK_ID, nak_hdr);
}
use of org.jgroups.protocols.pbcast.NakAckHeader2 in project JGroups by belaban.
the class NAKACK2_RetransmissionTest method injectMessage.
/**
* Makes NAKACK2 receive a message with the given seqno
*/
protected void injectMessage(long seqno) {
Message msg = new EmptyMessage(null).setSrc(B);
NakAckHeader2 hdr = NakAckHeader2.createMessageHeader(seqno);
msg.putHeader(ID, hdr);
nak.up(msg);
}
use of org.jgroups.protocols.pbcast.NakAckHeader2 in project geode by apache.
the class StatRecorder method filter.
private void filter(Message msg, int direction) {
if (direction == INCOMING) {
Header h = msg.getHeader(frag2HeaderId);
boolean copyBuffer = false;
if (h != null && h instanceof FragHeader) {
copyBuffer = true;
// String str = direction == OUTGOING? "sending" : "receiving";
// logger.debug("{} fragment {} msg buffer hash {} offset {} msg size {} first bytes=\n{}",
// str, hdr,
// msg.getRawBuffer().hashCode(), msg.getOffset(), msg.getLength(),
// GMSUtil.formatBytes(msg.getRawBuffer(), msg.getOffset(),
// Math.min(200, msg.getLength())));
} else {
h = msg.getHeader(unicastHeaderId);
if (h instanceof UNICAST3.Header) {
copyBuffer = true;
} else {
h = msg.getHeader(nakackHeaderId);
if (h instanceof NakAckHeader2) {
copyBuffer = true;
}
}
}
if (copyBuffer) {
// JGroups doesn't copy its message buffer when thread pools are
// disabled. This causes Frag2 fragments to become corrupted
msg.setBuffer(msg.getBuffer(), 0, msg.getLength());
}
}
}
use of org.jgroups.protocols.pbcast.NakAckHeader2 in project JGroups by belaban.
the class NAKACK_StressTest method createMessage.
private static Message createMessage(Address dest, Address src, long seqno, boolean oob) {
Message msg = new BytesMessage(dest, "hello world").setSrc(src);
NakAckHeader2 hdr = NakAckHeader2.createMessageHeader(seqno);
msg.putHeader(NAKACK_ID, hdr);
if (oob)
msg.setFlag(Message.Flag.OOB);
return msg;
}
use of org.jgroups.protocols.pbcast.NakAckHeader2 in project JGroups by belaban.
the class NAKACK_StressTest method start.
private static void start(final int num_threads, final int num_msgs, boolean oob) {
final NAKACK2 nak = new NAKACK2();
final AtomicInteger counter = new AtomicInteger(num_msgs);
final AtomicLong seqno = new AtomicLong(1);
final AtomicInteger delivered_msgs = new AtomicInteger(0);
final Lock lock = new ReentrantLock();
final Condition all_msgs_delivered = lock.newCondition();
final ConcurrentLinkedQueue<Long> delivered_msg_list = new ConcurrentLinkedQueue<>();
final Address local_addr = Util.createRandomAddress("A");
final Address sender = Util.createRandomAddress("B");
nak.setDownProtocol(new Protocol() {
public Object down(Event evt) {
return null;
}
});
nak.setUpProtocol(new Protocol() {
public Object up(Message msg) {
delivered_msgs.incrementAndGet();
NakAckHeader2 hdr = msg.getHeader(NAKACK_ID);
if (hdr != null)
delivered_msg_list.add(hdr.getSeqno());
if (delivered_msgs.get() >= num_msgs) {
lock.lock();
try {
all_msgs_delivered.signalAll();
} finally {
lock.unlock();
}
}
return null;
}
public void up(MessageBatch batch) {
for (Message msg : batch) {
delivered_msgs.incrementAndGet();
NakAckHeader2 hdr = msg.getHeader(NAKACK_ID);
if (hdr != null)
delivered_msg_list.add(hdr.getSeqno());
if (delivered_msgs.get() >= num_msgs) {
lock.lock();
try {
all_msgs_delivered.signalAll();
} finally {
lock.unlock();
}
}
}
}
});
nak.setDiscardDeliveredMsgs(true);
for (Protocol p = nak; p != null; p = p.getDownProtocol()) p.setAddress(local_addr);
nak.down(new Event(Event.BECOME_SERVER));
View view = View.create(local_addr, 1, local_addr, sender);
nak.down(new Event(Event.VIEW_CHANGE, view));
MutableDigest digest = new MutableDigest(view.getMembersRaw());
digest.set(local_addr, 0, 0);
digest.set(sender, 0, 0);
nak.down(new Event(Event.SET_DIGEST, digest));
final CountDownLatch latch = new CountDownLatch(1);
Sender[] adders = new Sender[num_threads];
for (int i = 0; i < adders.length; i++) {
adders[i] = new Sender(nak, latch, counter, seqno, oob, sender);
adders[i].start();
}
long start = System.currentTimeMillis();
// starts all adders
latch.countDown();
int max_tries = 30;
lock.lock();
try {
while (delivered_msgs.get() < num_msgs && max_tries-- > 0) {
try {
all_msgs_delivered.await(1000, TimeUnit.MILLISECONDS);
System.out.println("received " + delivered_msgs.get() + " msgs");
} catch (InterruptedException e) {
e.printStackTrace();
}
}
} finally {
lock.unlock();
}
long time = System.currentTimeMillis() - start;
double requests_sec = num_msgs / (time / 1000.0);
System.out.printf("\nTime: %d ms, %.2f requests / sec\n", time, requests_sec);
System.out.println("Delivered messages: " + delivered_msg_list.size());
if (delivered_msg_list.size() < 100)
System.out.println("Elements: " + delivered_msg_list);
nak.stop();
List<Long> results = new ArrayList<>(delivered_msg_list);
if (oob)
Collections.sort(results);
assert results.size() == num_msgs : "expected " + num_msgs + ", but got " + results.size();
System.out.println("Checking results consistency");
int i = 1;
for (Long num : results) {
if (num != i) {
assert i == num : "expected " + i + " but got " + num;
return;
}
i++;
}
System.out.println("OK");
}
Aggregations