use of net.dempsy.lifecycle.annotation.Mp in project Dempsy by Dempsy.
the class TestContainer method testEvictable.
@Test
public void testEvictable() throws Exception {
final TestProcessor mp = createAndGet("foo");
final TestProcessor prototype = context.getBean(TestProcessor.class);
final int tmpCloneCount = prototype.cloneCount.intValue();
mp.evict.set(true);
container.evict();
final TestAdaptor adaptor = context.getBean(TestAdaptor.class);
adaptor.dispatcher.dispatchAnnotated(new ContainerTestMessage("foo"));
assertTrue(poll(o -> prototype.cloneCount.intValue() > tmpCloneCount));
Thread.sleep(1000);
assertEquals("Clone count, 2nd message", tmpCloneCount + 1, prototype.cloneCount.intValue());
}
use of net.dempsy.lifecycle.annotation.Mp in project Dempsy by Dempsy.
the class TestContainer method testEvictableWithBusyMp.
@Test
public void testEvictableWithBusyMp() throws Throwable {
final TestProcessor mp = createAndGet("foo");
// now we're going to cause the processing to be held up.
mp.latch = new CountDownLatch(1);
// allow eviction
mp.evict.set(true);
// sending it a message will now cause it to hang up while processing
final TestAdaptor adaptor = context.getBean(TestAdaptor.class);
adaptor.dispatcher.dispatchAnnotated(new ContainerTestMessage("foo"));
final TestProcessor prototype = context.getBean(TestProcessor.class);
// keep track of the cloneCount for later checking
final int tmpCloneCount = prototype.cloneCount.intValue();
// invocation count should go to 2
assertTrue(poll(mp, o -> o.invocationCount == 2));
// now kick off the evict in a separate thread since we expect it to hang
// until the mp becomes unstuck.
// this will allow us to see the evict pass complete
final AtomicBoolean evictIsComplete = new AtomicBoolean(false);
final Thread thread = new Thread(new Runnable() {
@Override
public void run() {
container.evict();
evictIsComplete.set(true);
}
});
thread.start();
// now check to make sure eviction doesn't complete.
// just a little to give any mistakes a change to work themselves through
Thread.sleep(100);
// make sure eviction didn't finish
assertFalse(evictIsComplete.get());
// this lets it go
mp.latch.countDown();
// wait until the eviction completes
assertTrue(poll(evictIsComplete, o -> o.get()));
Thread.sleep(100);
assertEquals("activation count, 2nd message", 1, mp.activationCount);
assertEquals("invocation count, 2nd message", 2, mp.invocationCount);
adaptor.dispatcher.dispatchAnnotated(new ContainerTestMessage("foo"));
assertTrue(poll(o -> prototype.cloneCount.intValue() > tmpCloneCount));
Thread.sleep(1000);
assertEquals("Clone count, 2nd message", tmpCloneCount + 1, prototype.cloneCount.intValue());
}
use of net.dempsy.lifecycle.annotation.Mp in project Dempsy by Dempsy.
the class TestContainer method testFeedbackLoop.
@Test
public void testFeedbackLoop() throws Exception {
cache = new ConcurrentHashMap<>();
final TestAdaptor adaptor = context.getBean(TestAdaptor.class);
assertNotNull(adaptor.dispatcher);
adaptor.dispatcher.dispatchAnnotated(new MyMessage("foo"));
assertTrue(poll(o -> container.getProcessorCount() > 0));
Thread.sleep(100);
assertEquals("did not create MP", 1, container.getProcessorCount());
assertTrue(poll(cache, c -> c.get("foo") != null));
final TestProcessor mp = cache.get("foo");
assertNotNull("MP not associated with expected key", mp);
assertTrue(poll(mp, o -> o.invocationCount > 1));
assertEquals("activation count, 1st message", 1, mp.activationCount);
assertEquals("invocation count, 1st message", 2, mp.invocationCount);
adaptor.dispatcher.dispatchAnnotated(new ContainerTestMessage("foo"));
assertTrue(poll(mp, o -> o.invocationCount > 2));
Thread.sleep(100);
assertEquals("activation count, 2nd message", 1, mp.activationCount);
assertEquals("invocation count, 2nd message", 3, mp.invocationCount);
}
use of net.dempsy.lifecycle.annotation.Mp in project Dempsy by Dempsy.
the class TestAlmostSimple method testSimple.
@Test
public void testSimple() throws Exception {
final MpEven mpEven = new MpEven();
final MpOdd mpOdd = new MpOdd();
final AtomicLong count = new AtomicLong(0);
Adaptor adaptor = null;
try (final DefaultThreadingModel tm = new DefaultThreadingModel("TB", -1, 1);
final NodeManager nm = new NodeManager()) {
final Node n = new Node.Builder("test-app").defaultRoutingStrategyId("net.dempsy.router.simple").containerTypeId(containerTypeId).receiver(new Dummy()).cluster("start").adaptor(adaptor = new Adaptor() {
private Dispatcher disp;
boolean done = false;
int cur = 0;
AtomicBoolean exitedLoop = new AtomicBoolean(false);
@Override
public void stop() {
done = true;
while (!exitedLoop.get()) Thread.yield();
}
@Override
public void start() {
try {
while (!done) {
uncheck(() -> disp.dispatchAnnotated(new Message(cur++)));
count.incrementAndGet();
}
} finally {
exitedLoop.set(true);
}
}
@Override
public void setDispatcher(final Dispatcher dispatcher) {
this.disp = dispatcher;
}
}).cluster("mpEven").mp(new MessageProcessor<>(mpEven)).evictionFrequency(1, TimeUnit.SECONDS).cluster("mpOdd").mp(new MessageProcessor<>(mpOdd)).build();
nm.node(n).collaborator(new LocalClusterSessionFactory().createSession()).threadingModel(tm.start(n.application));
nm.start();
assertTrue(ConditionPoll.poll(c -> count.get() > 100000));
// make sure an eviction happened before closing
// at this point all of the mps should have been checked for eviction
assertTrue(ConditionPoll.poll(o -> 2 == mpEven.allMps.size()));
mpEven.allMps.forEach(m -> uncheck(() -> assertTrue(ConditionPoll.poll(o -> m.evictCalled))));
// now enable them to actually be removed from the container.
// we need to stop the adaptor or we'll keep recreating them
adaptor.stop();
// need to wait for all of the adaptor messages to play through before evicting
Thread.sleep(2000);
MpEven.allowEviction.set(true);
final List<Container> containers = nm.getContainers().stream().filter(c -> "mpEven".equals(c.getClusterId().clusterName)).collect(Collectors.toList());
assertEquals(1, containers.size());
final Container container = containers.get(0);
assertTrue(ConditionPoll.poll(o -> 0 == container.getProcessorCount()));
}
Thread.sleep(2000);
// make sure the messages were distributed correctly.
assertEquals(2, mpEven.allMps.size());
assertEquals(2, mpOdd.allMps.size());
final MpEven mpEvenYes = mpEven.allMps.stream().filter(m -> "yes".equals(m.key)).findAny().get();
// all messages here should be even numbers
assertTrue(mpEvenYes.received.stream().allMatch(m -> (m.message & 0x01) == 0));
final MpEven mpEvenNo = mpEven.allMps.stream().filter(m -> "no".equals(m.key)).findAny().get();
// all messages here should be odd numbers
assertTrue(mpEvenNo.received.stream().allMatch(m -> (m.message & 0x01) == 1));
final MpOdd mpOddYes = mpOdd.allMps.stream().filter(m -> "yes".equals(m.key)).findAny().get();
// all messages here should be odd numbers
assertTrue(mpOddYes.received.stream().allMatch(m -> (m.message & 0x01) == 1));
final MpOdd mpOddNo = mpOdd.allMps.stream().filter(m -> "no".equals(m.key)).findAny().get();
// all messages here should be even numbers
assertTrue(mpOddNo.received.stream().allMatch(m -> (m.message & 0x01) == 0));
}
Aggregations