use of net.dempsy.messages.KeyedMessage in project Dempsy by Dempsy.
the class TestContainer method testWrongTypeMessage.
@Test
public void testWrongTypeMessage() throws Exception {
assertEquals(0, ((ClusterMetricGetters) container.statCollector).getMessageFailedCount());
final KeyedMessageWithType kmwt = ke.extract(new MyMessage("YO")).get(0);
container.dispatch(new KeyedMessage(kmwt.key, new Object()), Operation.handle, true);
assertEquals(1, ((ClusterMetricGetters) container.statCollector).getMessageFailedCount());
}
use of net.dempsy.messages.KeyedMessage in project Dempsy by Dempsy.
the class TestSimple method testSimple.
@Test
public void testSimple() throws Exception {
final AtomicLong count = new AtomicLong(0L);
try (final NodeManager nm = new NodeManager();
final DefaultThreadingModel tm = new DefaultThreadingModel("TB", -1, 1)) {
final Node n = new Node.Builder("test-app").defaultRoutingStrategyId("net.dempsy.router.simple").receiver(new Dummy()).cluster("start").adaptor(new Adaptor() {
private Dispatcher disp;
boolean done = false;
@Override
public void stop() {
done = true;
}
@Override
public void start() {
try {
while (!done) {
disp.dispatch(new KeyedMessageWithType(Integer.valueOf(1), "Hello", "string"));
// This is here for when the Container has a max pending and it gets starved for CPU cycles
// in this particular test.
Thread.yield();
}
} catch (final InterruptedException ie) {
if (!done)
LOGGER.error("Interrupted but not stopping.");
}
}
@Override
public void setDispatcher(final Dispatcher dispatcher) {
this.disp = dispatcher;
}
}).cluster("mp").mp(new MessageProcessor(MpFactory.make(() -> new Mp() {
@Override
public KeyedMessageWithType[] handle(final KeyedMessage message) {
count.incrementAndGet();
return null;
}
}, "string"))).build();
nm.node(n).collaborator(new LocalClusterSessionFactory().createSession()).threadingModel(tm.start("nodeid"));
nm.start();
assertTrue(ConditionPoll.poll(o -> count.get() > 100000));
}
}
use of net.dempsy.messages.KeyedMessage in project Dempsy by Dempsy.
the class TestContainerLoadHandling method testMessagesCanQueueWithinLimitsBlocking.
/**
* Test the case where messages arrive faster than they are processed but the queue never exceeds the Thread Pool Size * 2, so no messages are discarded.
*/
@Test
public void testMessagesCanQueueWithinLimitsBlocking() throws Exception {
// produce the initial MPs
final ArrayList<Thread> in = new ArrayList<Thread>();
final KeyedMessage[] messages = new KeyedMessage[NUMMPS];
for (int i = 0; i < NUMMPS; i++) messages[i] = km(new MockInputMessage("key" + i));
for (int j = 0; j < NUMMPS; j++) in.add(sendMessages(container, new KeyedMessage[] { messages[j] }, 1));
// let the messages go.
SendMessageThread.startLatch.countDown();
assertTrue(poll(o -> in.stream().filter(t -> t.isAlive()).count() == 0));
assertEquals(NUMMPS, container.getProcessorCount());
final int numMessagePumped = dispatcher.messages.size();
assertEquals(numMessagePumped, clusterStats.getProcessedMessageCount());
dispatcher.messages.clear();
for (int i = 0; i < NUMRUNS; i++) {
final int iter = i;
final long initialMessagesDispatched = clusterStats.getDispatchedMessageCount();
// set up a gate for starting
SendMessageThread.startLatch = new CountDownLatch(1);
SendMessageThread.finishedCount.set(0);
SendMessageThread.processingCount.set(0);
commonLongRunningHandler = new CountDownLatch(1);
in.clear();
for (int j = 0; j < NUMMPS; j++) in.add(sendMessages(container, new KeyedMessage[] { km(new MockInputMessage("key" + j % NUMMPS)) }, DUPFACTOR));
assertTrue(poll(o -> SendMessageThread.processingCount.get() == NUMMPS));
// let the messages go.
SendMessageThread.startLatch.countDown();
// NUMMPS messages should be "dispatched"
assertTrue(poll(o -> (clusterStats.getDispatchedMessageCount() - initialMessagesDispatched) == NUMMPS));
// 1 message from each thread should be "in flight"
assertTrue(poll(o -> container.getMessageWorkingCount() == NUMMPS));
Thread.sleep(50);
// NUMMPS messages should STILL be "dispatched"
assertTrue(poll(o -> (clusterStats.getDispatchedMessageCount() - initialMessagesDispatched) == NUMMPS));
// let the rest of them go
commonLongRunningHandler.countDown();
assertTrue("Timeout waiting on message to be sent", poll(o -> in.stream().filter(t -> t.isAlive() == true).count() == 0));
// after sends are allowed to proceed
assertEquals(NUMMPS, SendMessageThread.finishedCount.get());
assertEquals(0, clusterStats.getInFlightMessageCount());
assertEquals((DUPFACTOR * NUMMPS) * (iter + 1), dispatcher.messages.size());
assertEquals((DUPFACTOR * NUMMPS) * (iter + 1) + numMessagePumped, clusterStats.getProcessedMessageCount());
assertEquals(0L, clusterStats.getMessageCollisionCount());
checkStat(clusterStats);
}
}
use of net.dempsy.messages.KeyedMessage in project Dempsy by Dempsy.
the class NonLockingAltContainer method dispatch.
// this is called directly from tests but shouldn't be accessed otherwise.
@Override
public void dispatch(final KeyedMessage message, final boolean block) throws IllegalArgumentException, ContainerException {
if (!isRunningLazy) {
LOGGER.debug("Dispacth called on stopped container");
statCollector.messageFailed(false);
}
if (message == null)
// No. We didn't process the null message
return;
if (message.message == null)
throw new IllegalArgumentException("the container for " + clusterId + " attempted to dispatch null message.");
if (message.key == null)
throw new ContainerException("Message " + objectDescription(message.message) + " contains no key.");
if (!inbound.doesMessageKeyBelongToNode(message.key)) {
if (LOGGER.isDebugEnabled())
LOGGER.debug("Message with key " + SafeString.objectDescription(message.key) + " sent to wrong container. ");
statCollector.messageFailed(false);
return;
}
numBeingWorked.incrementAndGet();
boolean instanceDone = false;
while (!instanceDone) {
instanceDone = true;
final InstanceWrapper wrapper = getInstanceForKey(message.key);
// wrapper will be null if the activate returns 'false'
if (wrapper != null) {
final MutRef<WorkingQueueHolder> mref = new MutRef<>();
boolean messageDone = false;
while (!messageDone) {
messageDone = true;
final WorkingQueueHolder mailbox = setIfAbsent(wrapper.mailbox, () -> mref.set(new WorkingQueueHolder(false)));
// if mailbox is null then I got it.
if (mailbox == null) {
// can't be null if I got the mailbox
final WorkingQueueHolder box = mref.ref;
// spin until I get the queue
final LinkedList<KeyedMessage> q = getQueue(box);
KeyedMessage toProcess = pushPop(q, message);
// put the queue back
box.queue.lazySet(q);
while (toProcess != null) {
invokeOperation(wrapper.instance, Operation.handle, toProcess);
numBeingWorked.getAndDecrement();
// get the next message
final LinkedList<KeyedMessage> queue = getQueue(box);
if (queue.size() == 0)
// we need to leave the queue out
break;
toProcess = queue.removeFirst();
box.queue.lazySet(queue);
}
// release the mailbox
wrapper.mailbox.set(null);
} else {
// we didn't get exclusive access so let's see if we can add the message to the mailbox
// make one try at putting the message in the mailbox.
final LinkedList<KeyedMessage> q = mailbox.queue.getAndSet(null);
if (q != null) {
// I got it!
q.add(message);
mailbox.queue.lazySet(q);
} else {
// see if we're evicted.
if (wrapper.evicted) {
instanceDone = false;
// start back at getting the instance.
break;
}
// start over from the top.
messageDone = false;
}
}
}
} else {
// if we got here then the activate on the Mp explicitly returned 'false'
if (LOGGER.isDebugEnabled())
LOGGER.debug("the container for " + clusterId + " failed to activate the Mp for " + SafeString.valueOf(prototype));
// leave the do/while loop
break;
}
}
}
use of net.dempsy.messages.KeyedMessage in project Dempsy by Dempsy.
the class NonLockingContainer method dispatch.
@Override
public void dispatch(final KeyedMessage message, final boolean block) throws IllegalArgumentException, ContainerException {
if (!isRunningLazy) {
LOGGER.debug("Dispacth called on stopped container");
statCollector.messageFailed(false);
}
if (message == null)
// No. We didn't process the null message
return;
if (message.message == null)
throw new IllegalArgumentException("the container for " + clusterId + " attempted to dispatch null message.");
if (message.key == null)
throw new ContainerException("Message " + objectDescription(message.message) + " contains no key.");
if (!inbound.doesMessageKeyBelongToNode(message.key)) {
if (LOGGER.isDebugEnabled())
LOGGER.debug("Message with key " + SafeString.objectDescription(message.key) + " sent to wrong container. ");
statCollector.messageFailed(false);
return;
}
final Object key = message.key;
boolean keepTrying = true;
while (keepTrying) {
final MutRef<WorkingPlaceholder> wph = new MutRef<>();
final WorkingPlaceholder alreadyThere = putIfAbsent(working, key, () -> wph.set(new WorkingPlaceholder()));
if (alreadyThere == null) {
// we're it!
final WorkingPlaceholder wp = wph.ref;
// we're not going to keep trying.
keepTrying = false;
// these will be dispatched while NOT having the lock
List<KeyedMessageWithType> response = null;
try {
// if we don't get the WorkingPlaceholder out of the working map then that Mp will forever be lost.
// we're working one.
numBeingWorked.incrementAndGet();
Object instance = instances.get(key);
if (instance == null) {
try {
// this can throw
instance = createAndActivate(key);
} catch (final RuntimeException e) {
// container or runtime exception
// This will drain the swamp
LOGGER.debug("Failed to process message with key " + SafeString.objectDescription(message.key), e);
instance = null;
}
}
if (instance == null) {
// activation or creation failed.
// decrement for this one
numBeingWorked.decrementAndGet();
LOGGER.debug("Can't handle message {} because the creation of the Mp seems to have failed.", SafeString.objectDescription(key));
final WorkingQueueHolder mailbox = getQueue(wp);
if (mailbox.queue != null) {
mailbox.queue.forEach(m -> {
LOGGER.debug("Failed to process message with key " + SafeString.objectDescription(m.key));
statCollector.messageFailed(true);
// decrement for each in the queue
numBeingWorked.decrementAndGet();
});
}
} else {
KeyedMessage curMessage = message;
while (curMessage != null) {
// can't be null the first time
final List<KeyedMessageWithType> resp = invokeOperation(instance, Operation.handle, curMessage);
if (resp != null) {
// these responses will be dispatched after we release the lock.
if (response == null)
response = new ArrayList<>();
response.addAll(resp);
}
// decrement the initial increment.
numBeingWorked.decrementAndGet();
// work off the queue.
// spin until I have it.
final WorkingQueueHolder mailbox = getQueue(wp);
if (mailbox.queue != null && mailbox.queue.size() > 0) {
// if there are messages in the queue
// take a message off the queue
curMessage = mailbox.queue.removeFirst();
// curMessage CAN'T be NULL!!!!
// releasing the lock on the mailbox ... we're ready to process 'curMessage' on the next loop
wp.mailbox.set(mailbox);
} else {
curMessage = null;
// (1) NOTE: DON'T put the queue back. This will prevent ALL other threads trying to drop a message
// in this box. When an alternate thread tries to open the mailbox to put a message in, if it can't,
// because THIS thread's left it locked, the other thread starts the process from the beginning
// re-attempting to get exclusive control over the Mp. In other words, the other thread only makes
// a single attempt and if it fails it goes back to attempting to get the Mp from the beginning.
//
// This thread cannot give up the current Mp if there's a potential for any data to end up in the
// queue. Since we're about to give up the Mp we cannot allow the mailbox to become available
// therefore we cannot allow any other threads to spin on it.
}
}
}
} finally {
if (working.remove(key) == null)
LOGGER.error("IMPOSSIBLE! Null key removed from working set.", new RuntimeException());
}
if (response != null) {
try {
dispatcher.dispatch(response);
} catch (final Exception de) {
LOGGER.warn("Failed on subsequent dispatch of " + response + ": " + de.getLocalizedMessage());
}
}
} else {
// ... we didn't get the lock
if (!block) {
// blocking means no collisions allowed.
if (LOGGER.isTraceEnabled())
LOGGER.trace("the container for " + clusterId + " failed to obtain lock on " + SafeString.valueOf(prototype));
statCollector.messageCollision(message);
keepTrying = false;
} else {
// try and get the queue.
final WorkingQueueHolder mailbox = alreadyThere.mailbox.getAndSet(null);
if (mailbox != null) {
// we got the queue!
try {
keepTrying = false;
// drop a message in the mailbox queue and mark it as being worked.
numBeingWorked.incrementAndGet();
if (mailbox.queue == null)
mailbox.queue = new LinkedList<>();
mailbox.queue.add(message);
} finally {
// put it back - releasing the lock
alreadyThere.mailbox.set(mailbox);
}
} else {
// if we didn't get the queue, we need to start completely over.
// otherwise there's a potential race condition - see the note at (1).
// we failed to get the queue ... maybe we'll have better luck next time.
}
}
// we didn't get the lock and we're blocking and we're now done handling the mailbox
}
// we didn't get the lock so we tried the mailbox (or ended becasuse we're non-blocking)
}
// keep working
}
Aggregations