use of net.dempsy.messages.Adaptor in project Dempsy by Dempsy.
the class TestContainer method testMtInvokeOutput.
@Test
public void testMtInvokeOutput() throws Exception {
outputMessages = Collections.newSetFromMap(new ConcurrentHashMap<>());
final int numInstances = 20;
final TestAdaptor adaptor = context.getBean(TestAdaptor.class);
assertNotNull(adaptor.dispatcher);
for (int i = 0; i < numInstances; i++) {
adaptor.dispatcher.dispatchAnnotated(new ContainerTestMessage("foo" + i));
// help the container when it has a limited queue
Thread.yield();
}
assertTrue(poll(container, c -> (c.getProcessorCount() + ((ClusterMetricGetters) c.statCollector).getMessageDiscardedCount()) > 19));
Thread.sleep(100);
final long messagesDiscarded = ((ClusterMetricGetters) container.statCollector).getMessageDiscardedCount();
assertEquals("number of MP instances", 20, container.getProcessorCount() + messagesDiscarded);
try (NodeManager nman = addOutputCatchStage()) {
container.outputPass();
assertTrue(poll(outputMessages, o -> (o.size() + messagesDiscarded) > 19));
Thread.sleep(100);
assertEquals(20, outputMessages.size() + messagesDiscarded);
}
}
use of net.dempsy.messages.Adaptor in project Dempsy by Dempsy.
the class TestExplicitDestinations method testSeparateNodes.
@Test
public void testSeparateNodes() throws Exception {
final String[][] oneNodePath = new String[][] { { "explicit-destinations/adaptor.xml", "explicit-destinations/mp1.xml", "explicit-destinations/mp2.xml" } };
runCombos("ted-one-node", oneNodePath, ns -> {
reset();
final NodeManager manager = ns.nodes.get(0).manager;
final ClassPathXmlApplicationContext ctx = ns.nodes.get(0).ctx;
// wait until I can reach the cluster from the adaptor.
assertTrue(poll(o -> manager.getRouter().allReachable("mp2-cluster").size() == 1));
final ONeeGenerator adaptor = ctx.getBean(ONeeGenerator.class);
adaptor.latch.countDown();
assertTrue(poll(o -> adaptor.done.get()));
assertEquals(1, Mp1.uniqueCounts.size());
});
}
use of net.dempsy.messages.Adaptor in project Dempsy by Dempsy.
the class TestContainer method testInvokeOutput.
@Test
public void testInvokeOutput() throws Exception {
outputMessages = Collections.newSetFromMap(new ConcurrentHashMap<>());
cache = new ConcurrentHashMap<>();
final TestAdaptor adaptor = context.getBean(TestAdaptor.class);
assertNotNull(adaptor.dispatcher);
adaptor.dispatcher.dispatchAnnotated(new ContainerTestMessage("foo"));
adaptor.dispatcher.dispatchAnnotated(new ContainerTestMessage("bar"));
assertTrue(poll(container, c -> (c.getProcessorCount() + ((ClusterMetricGetters) c.statCollector).getMessageDiscardedCount()) > 1));
Thread.sleep(100);
assertEquals("number of MP instances", 2, container.getProcessorCount());
try (NodeManager nman = addOutputCatchStage()) {
final TestProcessor mp = cache.get("foo");
assertTrue(poll(mp, m -> mp.invocationCount > 0));
Thread.sleep(100);
assertEquals("invocation count, 1st message", 1, mp.invocationCount);
// because the sessionFactory is shared and the appname is the same, we should be in the same app
container.outputPass();
assertTrue(poll(outputMessages, o -> o.size() > 1));
Thread.sleep(100);
assertEquals(2, outputMessages.size());
// no new mps created in the first one
assertEquals("did not create MP", 2, container.getProcessorCount());
// but the invocation count should have increased since the output cycles feeds messages back to this cluster
assertTrue(poll(mp, m -> mp.invocationCount > 1));
Thread.sleep(100);
assertEquals("invocation count, 1st message", 2, mp.invocationCount);
// // order of messages is not guaranteed, so we need to aggregate keys
final HashSet<String> messageKeys = new HashSet<String>();
final Iterator<OutputMessage> iter = outputMessages.iterator();
messageKeys.add(iter.next().getKey());
messageKeys.add(iter.next().getKey());
assertTrue("first MP sent output", messageKeys.contains("foo"));
assertTrue("second MP sent output", messageKeys.contains("bar"));
}
}
use of net.dempsy.messages.Adaptor in project Dempsy by Dempsy.
the class TestContainer method testEvictCollisionWithBlocking.
@Test
public void testEvictCollisionWithBlocking() throws Throwable {
final TestProcessor mp = createAndGet("foo");
// now we're going to cause the passivate to be held up.
mp.blockPassivate = new CountDownLatch(1);
// allow eviction
mp.evict.set(true);
// now kick off the evict in a separate thread since we expect it to hang
// until the mp becomes unstuck.
// this will allow us to see the evict pass complete
final AtomicBoolean evictIsComplete = new AtomicBoolean(false);
final Thread thread = new Thread(new Runnable() {
@Override
public void run() {
container.evict();
evictIsComplete.set(true);
}
});
thread.start();
// let it get going.
Thread.sleep(500);
// check to see we're hung.
assertFalse(evictIsComplete.get());
final ClusterMetricGetters sc = (ClusterMetricGetters) statsCollector;
assertEquals(0, sc.getMessageCollisionCount());
// sending it a message will now cause it to have the collision tick up
final TestAdaptor adaptor = context.getBean(TestAdaptor.class);
adaptor.dispatcher.dispatchAnnotated(new ContainerTestMessage("foo"));
// give it some time.
Thread.sleep(100);
// make sure there's no collision
assertEquals(0, sc.getMessageCollisionCount());
// make sure no message got handled
// 1 is the initial invocation that caused the instantiation.
assertEquals(1, mp.invocationCount);
// now let the evict finish
mp.blockPassivate.countDown();
// wait until the eviction completes
assertTrue(poll(evictIsComplete, o -> o.get()));
// Once the poll finishes a new Mp is instantiated and handling messages.
assertTrue(poll(cache, c -> c.get("foo") != null));
final TestProcessor mp2 = cache.get("foo");
assertNotNull("MP not associated with expected key", mp);
// invocationCount should be 1 from the initial invocation that caused the clone, and no more
assertEquals(1, mp.invocationCount);
assertEquals(1, mp2.invocationCount);
assertTrue(mp != mp2);
// send a message that should go through
adaptor.dispatcher.dispatchAnnotated(new ContainerTestMessage("foo"));
assertTrue(poll(o -> mp2.invocationCount > 1));
Thread.sleep(100);
assertEquals(1, mp.invocationCount);
assertEquals(2, mp2.invocationCount);
}
use of net.dempsy.messages.Adaptor in project Dempsy by Dempsy.
the class NodeManager method start.
public NodeManager start() throws DempsyException {
validate();
nodeStatsCollector = tr.track((NodeStatsCollector) node.getNodeStatsCollector());
// TODO: cleaner?
statsCollectorFactory = tr.track(new Manager<>(ClusterStatsCollectorFactory.class).getAssociatedInstance(node.getClusterStatsCollectorFactoryId()));
// =====================================
// set the dispatcher on adaptors and create containers for mp clusters
final AtomicReference<String> firstAdaptorClusterName = new AtomicReference<>(null);
node.getClusters().forEach(c -> {
if (c.isAdaptor()) {
if (firstAdaptorClusterName.get() == null)
firstAdaptorClusterName.set(c.getClusterId().clusterName);
final Adaptor adaptor = c.getAdaptor();
adaptors.put(c.getClusterId(), adaptor);
if (c.getRoutingStrategyId() != null && !"".equals(c.getRoutingStrategyId().trim()) && !" ".equals(c.getRoutingStrategyId().trim()))
LOGGER.warn("The cluster " + c.getClusterId() + " contains an adaptor but also has the routingStrategy set. The routingStrategy will be ignored.");
if (c.getOutputScheduler() != null)
LOGGER.warn("The cluster " + c.getClusterId() + " contains an adaptor but also has an output executor set. The output executor will never be used.");
} else {
String containerTypeId = c.getContainerTypeId();
if (containerTypeId == null)
// can't be null
containerTypeId = node.getContainerTypeId();
final Container con = makeContainer(containerTypeId).setMessageProcessor(c.getMessageProcessor()).setClusterId(c.getClusterId()).setMaxPendingMessagesPerContainer(c.getMaxPendingMessagesPerContainer());
// TODO: This is a hack for now.
final Manager<RoutingStrategy.Inbound> inboundManager = new RoutingInboundManager();
final RoutingStrategy.Inbound is = inboundManager.getAssociatedInstance(c.getRoutingStrategyId());
final boolean outputSupported = c.getMessageProcessor().isOutputSupported();
final Object outputScheduler = c.getOutputScheduler();
// if there mp handles output but there's no output scheduler then we should warn.
if (outputSupported && outputScheduler == null)
LOGGER.warn("The cluster " + c.getClusterId() + " contains a message processor that supports an output cycle but there's no executor set so it will never be invoked.");
if (!outputSupported && outputScheduler != null)
LOGGER.warn("The cluster " + c.getClusterId() + " contains a message processor that doesn't support an output cycle but there's an output cycle executor set. The output cycle executor will never be used.");
final OutputScheduler os = (outputSupported && outputScheduler != null) ? (OutputScheduler) outputScheduler : null;
containers.add(new PerContainer(con, is, c, os));
}
});
// it we're all adaptor then don't bother to get the receiver.
if (containers.size() == 0) {
// here there's no point in a receiver since there's nothing to receive.
if (firstAdaptorClusterName.get() == null)
throw new IllegalStateException("There seems to be no clusters or adaptors defined for this node \"" + node.toString() + "\"");
} else {
receiver = (Receiver) node.getReceiver();
if (// otherwise we're all adaptor
receiver != null)
nodeAddress = receiver.getAddress(this);
else if (firstAdaptorClusterName.get() == null)
throw new IllegalStateException("There seems to be no clusters or adaptors defined for this node \"" + node.toString() + "\"");
}
nodeId = Optional.ofNullable(nodeAddress).map(n -> n.getGuid()).orElse(firstAdaptorClusterName.get());
if (nodeStatsCollector == null) {
LOGGER.warn("There is no {} set for the the application '{}'", StatsCollector.class.getSimpleName(), node.application);
nodeStatsCollector = new DummyNodeStatsCollector();
}
nodeStatsCollector.setNodeId(nodeId);
if (nodeAddress == null && node.getReceiver() != null)
LOGGER.warn("The node at " + nodeId + " contains no message processors but has a Reciever set. The receiver will never be started.");
if (nodeAddress == null && node.getDefaultRoutingStrategyId() != null)
LOGGER.warn("The node at " + nodeId + " contains no message processors but has a defaultRoutingStrategyId set. The routingStrategyId will never be used.");
if (threading == null)
threading = tr.track(new DefaultThreadingModel(nodeId)).configure(node.getConfiguration()).start(nodeId);
else if (!threading.isStarted())
threading.start(nodeId);
nodeStatsCollector.setMessagesPendingGauge(() -> threading.getNumberLimitedPending());
final NodeReceiver nodeReciever = receiver == null ? null : tr.track(new NodeReceiver(containers.stream().map(pc -> pc.container).collect(Collectors.toList()), threading, nodeStatsCollector));
final Map<ClusterId, ClusterInformation> messageTypesByClusterId = new HashMap<>();
containers.stream().map(pc -> pc.clusterDefinition).forEach(c -> {
messageTypesByClusterId.put(c.getClusterId(), new ClusterInformation(c.getRoutingStrategyId(), c.getClusterId(), c.getMessageProcessor().messagesTypesHandled()));
});
final NodeInformation nodeInfo = nodeAddress != null ? new NodeInformation(receiver.transportTypeId(), nodeAddress, messageTypesByClusterId) : null;
// Then actually register the Node
if (nodeInfo != null) {
keepNodeRegstered = new PersistentTask(LOGGER, isRunning, persistenceScheduler, RETRY_PERIOND_MILLIS) {
@Override
public boolean execute() {
try {
session.recursiveMkdir(rootPaths.clustersDir, null, DirMode.PERSISTENT, DirMode.PERSISTENT);
session.recursiveMkdir(rootPaths.nodesDir, null, DirMode.PERSISTENT, DirMode.PERSISTENT);
final String nodePath = rootPaths.nodesDir + "/" + nodeId;
session.mkdir(nodePath, nodeInfo, DirMode.EPHEMERAL);
final NodeInformation reread = (NodeInformation) session.getData(nodePath, this);
final boolean ret = nodeInfo.equals(reread);
if (ret == true)
ptaskReady.set(true);
return ret;
} catch (final ClusterInfoException e) {
final String logmessage = "Failed to register the node. Retrying in " + RETRY_PERIOND_MILLIS + " milliseconds.";
if (LOGGER.isDebugEnabled())
LOGGER.info(logmessage, e);
else
LOGGER.info(logmessage, e);
}
return false;
}
@Override
public String toString() {
return "register node information";
}
};
}
// =====================================
// The layering works this way.
//
// Receiver -> NodeReceiver -> adaptor -> container -> OutgoingDispatcher -> RoutingStrategyOB -> Transport
//
// starting needs to happen in reverse.
// =====================================
isRunning.set(true);
this.tManager = tr.start(new TransportManager(), this);
this.rsManager = tr.start(new RoutingStrategyManager(), this);
// create the router but don't start it yet.
this.router = new OutgoingDispatcher(rsManager, nodeAddress, nodeId, nodeReciever, tManager, nodeStatsCollector);
// set up containers
containers.forEach(pc -> pc.container.setDispatcher(router).setEvictionCycle(pc.clusterDefinition.getEvictionFrequency().evictionFrequency, pc.clusterDefinition.getEvictionFrequency().evictionTimeUnit));
// IB routing strategy
final int numContainers = containers.size();
for (int i = 0; i < numContainers; i++) {
final PerContainer c = containers.get(i);
c.inboundStrategy.setContainerDetails(c.clusterDefinition.getClusterId(), new ContainerAddress(nodeAddress, i), c.container);
}
// setup the output executors by passing the containers
containers.stream().filter(pc -> pc.outputScheduler != null).forEach(pc -> pc.outputScheduler.setOutputInvoker(pc.container));
// set up adaptors
adaptors.values().forEach(a -> a.setDispatcher(router));
// start containers after setting inbound
containers.forEach(pc -> tr.start(pc.container.setInbound(pc.inboundStrategy), this));
// start the output schedulers now that the containers have been started.
containers.stream().map(pc -> pc.outputScheduler).filter(os -> os != null).forEach(os -> tr.start(os, this));
// start IB routing strategy
containers.forEach(pc -> tr.start(pc.inboundStrategy, this));
// start router
tr.start(this.router, this);
final PersistentTask startAdaptorAfterRouterIsRunning = new PersistentTask(LOGGER, isRunning, this.persistenceScheduler, 500) {
@Override
public boolean execute() {
if (!router.isReady())
return false;
adaptors.entrySet().forEach(e -> threading.runDaemon(() -> tr.track(e.getValue()).start(), "Adaptor-" + e.getKey().clusterName));
return true;
}
};
// make sure the router is running. Once it is, start the adaptor(s)
startAdaptorAfterRouterIsRunning.process();
if (receiver != null)
tr.track(receiver).start(nodeReciever, this);
// close this session when we're done.
tr.track(session);
// make it known we're here and ready
if (keepNodeRegstered != null)
keepNodeRegstered.process();
else
ptaskReady.set(true);
return this;
}
Aggregations