use of net.dempsy.threading.DefaultThreadingModel in project Dempsy by Dempsy.
the class BlockingQueueTest method testBlockingQueueOverflow.
/**
* Test overflow for a blocking Transport around a queue with depth one. While the transport will not call the, and does not even have a , overflow handler,
* every message will call the overflow handler on
* the receiver since the queue is always full.
*
* @throws Throwable
*/
@Test
public void testBlockingQueueOverflow() throws Throwable {
final AtomicReference<String> message = new AtomicReference<String>(null);
final ArrayBlockingQueue<Object> input = new ArrayBlockingQueue<>(1);
try (@SuppressWarnings("resource") SystemPropertyManager // test only works when the blocking queue blocks
props = new SystemPropertyManager().set(BlockingQueueSenderFactory.class.getPackageName() + "." + BlockingQueueSenderFactory.BLOCKING_KEY, "true");
final TestInfrastructure infra = new TestInfrastructure(new DefaultThreadingModel("BQTest-testBlockingQueueOverflow-"));
final Receiver r = new BlockingQueueReceiver(input);
final TransportManager tranMan = chain(new TransportManager(), c -> c.start(infra));
final SenderFactory sf = tranMan.getAssociatedInstance(transportTypeId)) {
final Sender sender = sf.getSender(r.getAddress(infra));
final AtomicBoolean finallySent = new AtomicBoolean(false);
final AtomicLong receiveCount = new AtomicLong();
// fill up queue
sender.send("Hello");
final Thread t = new Thread(() -> {
try {
sender.send("Hello again");
} catch (final MessageTransportException | InterruptedException e) {
throw new RuntimeException(e);
}
finallySent.set(true);
});
t.start();
Thread.sleep(100);
// the thread should be hung blocked on the send
assertFalse(finallySent.get());
// Start the receiver to read
r.start((final String msg) -> {
message.set(new String(msg));
receiveCount.incrementAndGet();
return true;
}, infra);
// 2 messages should have been read and the 2nd should be "Hello again"
assertTrue(poll(o -> "Hello again".equals(message.get())));
// The thread should shut down eventually
assertTrue(poll(o -> !t.isAlive()));
}
}
use of net.dempsy.threading.DefaultThreadingModel in project Dempsy by Dempsy.
the class TcpTransportTest method testConnectionRecovery.
@Test
public void testConnectionRecovery() throws Exception {
try (final ServiceTracker tr = new ServiceTracker()) {
final AbstractTcpReceiver<?, ?> r = tr.track(receiver.get()).numHandlers(2).useLocalHost(true);
// can't test connection recovery here.
if (!(r instanceof DisruptableRecevier))
return;
final ThreadingModel tm = tr.track(new DefaultThreadingModel(TcpTransportTest.class.getSimpleName() + ".testConnectionRecovery"));
final Infrastructure infra = tr.track(new TestInfrastructure(tm));
final TcpAddress addr = r.getAddress(infra);
LOGGER.debug(addr.toString());
final AtomicReference<RoutedMessage> rm = new AtomicReference<>(null);
r.start((Listener<RoutedMessage>) msg -> {
rm.set(msg);
return true;
}, infra);
try (final SenderFactory sf = senderFactory.get()) {
sf.start(new TestInfrastructure(tm) {
@Override
public String getNodeId() {
return "test";
}
});
final Sender sender = sf.getSender(addr);
sender.send(new RoutedMessage(new int[] { 0 }, "Hello", "Hello"));
assertTrue(poll(o -> rm.get() != null));
assertEquals("Hello", rm.get().message);
assertTrue(((DisruptableRecevier) r).disrupt(addr));
final AtomicBoolean stop = new AtomicBoolean(false);
final RoutedMessage resetMessage = new RoutedMessage(new int[] { 0 }, "RESET", "RESET");
final Thread senderThread = new Thread(() -> {
try {
while (!stop.get()) {
sender.send(resetMessage);
dontInterrupt(() -> Thread.sleep(100));
}
} catch (final InterruptedException ie) {
if (!stop.get())
LOGGER.error("Interrupted send");
}
}, "testConnectionRecovery-sender");
senderThread.start();
try {
assertTrue(poll(o -> "RESET".equals(rm.get().message)));
} finally {
stop.set(true);
if (!poll(senderThread, t -> {
dontInterrupt(() -> t.join(10000));
return !t.isAlive();
}))
LOGGER.error("FAILED TO SHUT DOWN TEST SENDING THREAD. THREAD LEAK!");
}
}
}
}
use of net.dempsy.threading.DefaultThreadingModel in project Dempsy by Dempsy.
the class TcpTransportTest method testMessage.
@Test
public void testMessage() throws Exception {
try (ServiceTracker tr = new ServiceTracker()) {
final AbstractTcpReceiver<?, ?> r = tr.track(receiver.get()).numHandlers(2).useLocalHost(true);
final ThreadingModel tm = tr.track(new DefaultThreadingModel(TcpTransportTest.class.getSimpleName() + ".testMessage"));
final Infrastructure infra = tr.track(new TestInfrastructure(tm));
final TcpAddress addr = r.getAddress(infra);
LOGGER.debug(addr.toString());
final AtomicReference<RoutedMessage> rm = new AtomicReference<>(null);
r.start((Listener<RoutedMessage>) msg -> {
rm.set(msg);
return true;
}, infra);
try (final SenderFactory sf = senderFactory.get()) {
sf.start(new TestInfrastructure(tm) {
@Override
public String getNodeId() {
return "test";
}
});
final Sender sender = sf.getSender(addr);
sender.send(new RoutedMessage(new int[] { 0 }, "Hello", "Hello"));
assertTrue(poll(o -> rm.get() != null));
assertEquals("Hello", rm.get().message);
}
}
}
use of net.dempsy.threading.DefaultThreadingModel in project Dempsy by Dempsy.
the class TestInstanceManager method setupContainer.
@SuppressWarnings("resource")
public Container setupContainer(final MessageProcessorLifecycle<?> prototype) throws ContainerException {
dispatcher = new DummyDispatcher();
statsCollector = new BasicClusterStatsCollector();
manager = new NonLockingAltContainer().setMessageProcessor(prototype).setClusterId(new ClusterId("test", "test"));
manager.setDispatcher(dispatcher);
manager.setInbound(new DummyInbound());
tm = new DefaultThreadingModel(TestInstanceManager.class.getName());
tm.start(TestInstanceManager.class.getName());
manager.start(new TestInfrastructure(tm) {
BasicNodeStatsCollector nStats = new BasicNodeStatsCollector();
@Override
public ClusterStatsCollector getClusterStatsCollector(final ClusterId clusterId) {
return statsCollector;
}
@Override
public NodeStatsCollector getNodeStatsCollector() {
return nStats;
}
});
return manager;
}
use of net.dempsy.threading.DefaultThreadingModel in project Dempsy by Dempsy.
the class NodeManager method start.
public NodeManager start() throws DempsyException {
validate();
nodeStatsCollector = tr.track((NodeStatsCollector) node.getNodeStatsCollector());
// TODO: cleaner?
statsCollectorFactory = tr.track(new Manager<>(ClusterStatsCollectorFactory.class).getAssociatedInstance(node.getClusterStatsCollectorFactoryId()));
// =====================================
// set the dispatcher on adaptors and create containers for mp clusters
final AtomicReference<String> firstAdaptorClusterName = new AtomicReference<>(null);
node.getClusters().forEach(c -> {
if (c.isAdaptor()) {
if (firstAdaptorClusterName.get() == null)
firstAdaptorClusterName.set(c.getClusterId().clusterName);
final Adaptor adaptor = c.getAdaptor();
adaptors.put(c.getClusterId(), adaptor);
if (c.getRoutingStrategyId() != null && !"".equals(c.getRoutingStrategyId().trim()) && !" ".equals(c.getRoutingStrategyId().trim()))
LOGGER.warn("The cluster " + c.getClusterId() + " contains an adaptor but also has the routingStrategy set. The routingStrategy will be ignored.");
if (c.getOutputScheduler() != null)
LOGGER.warn("The cluster " + c.getClusterId() + " contains an adaptor but also has an output executor set. The output executor will never be used.");
} else {
String containerTypeId = c.getContainerTypeId();
if (containerTypeId == null)
// can't be null
containerTypeId = node.getContainerTypeId();
final Container con = makeContainer(containerTypeId).setMessageProcessor(c.getMessageProcessor()).setClusterId(c.getClusterId()).setMaxPendingMessagesPerContainer(c.getMaxPendingMessagesPerContainer());
// TODO: This is a hack for now.
final Manager<RoutingStrategy.Inbound> inboundManager = new RoutingInboundManager();
final RoutingStrategy.Inbound is = inboundManager.getAssociatedInstance(c.getRoutingStrategyId());
final boolean outputSupported = c.getMessageProcessor().isOutputSupported();
final Object outputScheduler = c.getOutputScheduler();
// if there mp handles output but there's no output scheduler then we should warn.
if (outputSupported && outputScheduler == null)
LOGGER.warn("The cluster " + c.getClusterId() + " contains a message processor that supports an output cycle but there's no executor set so it will never be invoked.");
if (!outputSupported && outputScheduler != null)
LOGGER.warn("The cluster " + c.getClusterId() + " contains a message processor that doesn't support an output cycle but there's an output cycle executor set. The output cycle executor will never be used.");
final OutputScheduler os = (outputSupported && outputScheduler != null) ? (OutputScheduler) outputScheduler : null;
containers.add(new PerContainer(con, is, c, os));
}
});
// it we're all adaptor then don't bother to get the receiver.
if (containers.size() == 0) {
// here there's no point in a receiver since there's nothing to receive.
if (firstAdaptorClusterName.get() == null)
throw new IllegalStateException("There seems to be no clusters or adaptors defined for this node \"" + node.toString() + "\"");
} else {
receiver = (Receiver) node.getReceiver();
if (// otherwise we're all adaptor
receiver != null)
nodeAddress = receiver.getAddress(this);
else if (firstAdaptorClusterName.get() == null)
throw new IllegalStateException("There seems to be no clusters or adaptors defined for this node \"" + node.toString() + "\"");
}
nodeId = Optional.ofNullable(nodeAddress).map(n -> n.getGuid()).orElse(firstAdaptorClusterName.get());
if (nodeStatsCollector == null) {
LOGGER.warn("There is no {} set for the the application '{}'", StatsCollector.class.getSimpleName(), node.application);
nodeStatsCollector = new DummyNodeStatsCollector();
}
nodeStatsCollector.setNodeId(nodeId);
if (nodeAddress == null && node.getReceiver() != null)
LOGGER.warn("The node at " + nodeId + " contains no message processors but has a Reciever set. The receiver will never be started.");
if (nodeAddress == null && node.getDefaultRoutingStrategyId() != null)
LOGGER.warn("The node at " + nodeId + " contains no message processors but has a defaultRoutingStrategyId set. The routingStrategyId will never be used.");
if (threading == null)
threading = tr.track(new DefaultThreadingModel(nodeId)).configure(node.getConfiguration()).start(nodeId);
else if (!threading.isStarted())
threading.start(nodeId);
nodeStatsCollector.setMessagesPendingGauge(() -> threading.getNumberLimitedPending());
final NodeReceiver nodeReciever = receiver == null ? null : tr.track(new NodeReceiver(containers.stream().map(pc -> pc.container).collect(Collectors.toList()), threading, nodeStatsCollector));
final Map<ClusterId, ClusterInformation> messageTypesByClusterId = new HashMap<>();
containers.stream().map(pc -> pc.clusterDefinition).forEach(c -> {
messageTypesByClusterId.put(c.getClusterId(), new ClusterInformation(c.getRoutingStrategyId(), c.getClusterId(), c.getMessageProcessor().messagesTypesHandled()));
});
final NodeInformation nodeInfo = nodeAddress != null ? new NodeInformation(receiver.transportTypeId(), nodeAddress, messageTypesByClusterId) : null;
// Then actually register the Node
if (nodeInfo != null) {
keepNodeRegstered = new PersistentTask(LOGGER, isRunning, persistenceScheduler, RETRY_PERIOND_MILLIS) {
@Override
public boolean execute() {
try {
session.recursiveMkdir(rootPaths.clustersDir, null, DirMode.PERSISTENT, DirMode.PERSISTENT);
session.recursiveMkdir(rootPaths.nodesDir, null, DirMode.PERSISTENT, DirMode.PERSISTENT);
final String nodePath = rootPaths.nodesDir + "/" + nodeId;
session.mkdir(nodePath, nodeInfo, DirMode.EPHEMERAL);
final NodeInformation reread = (NodeInformation) session.getData(nodePath, this);
final boolean ret = nodeInfo.equals(reread);
if (ret == true)
ptaskReady.set(true);
return ret;
} catch (final ClusterInfoException e) {
final String logmessage = "Failed to register the node. Retrying in " + RETRY_PERIOND_MILLIS + " milliseconds.";
if (LOGGER.isDebugEnabled())
LOGGER.info(logmessage, e);
else
LOGGER.info(logmessage, e);
}
return false;
}
@Override
public String toString() {
return "register node information";
}
};
}
// =====================================
// The layering works this way.
//
// Receiver -> NodeReceiver -> adaptor -> container -> OutgoingDispatcher -> RoutingStrategyOB -> Transport
//
// starting needs to happen in reverse.
// =====================================
isRunning.set(true);
this.tManager = tr.start(new TransportManager(), this);
this.rsManager = tr.start(new RoutingStrategyManager(), this);
// create the router but don't start it yet.
this.router = new OutgoingDispatcher(rsManager, nodeAddress, nodeId, nodeReciever, tManager, nodeStatsCollector);
// set up containers
containers.forEach(pc -> pc.container.setDispatcher(router).setEvictionCycle(pc.clusterDefinition.getEvictionFrequency().evictionFrequency, pc.clusterDefinition.getEvictionFrequency().evictionTimeUnit));
// IB routing strategy
final int numContainers = containers.size();
for (int i = 0; i < numContainers; i++) {
final PerContainer c = containers.get(i);
c.inboundStrategy.setContainerDetails(c.clusterDefinition.getClusterId(), new ContainerAddress(nodeAddress, i), c.container);
}
// setup the output executors by passing the containers
containers.stream().filter(pc -> pc.outputScheduler != null).forEach(pc -> pc.outputScheduler.setOutputInvoker(pc.container));
// set up adaptors
adaptors.values().forEach(a -> a.setDispatcher(router));
// start containers after setting inbound
containers.forEach(pc -> tr.start(pc.container.setInbound(pc.inboundStrategy), this));
// start the output schedulers now that the containers have been started.
containers.stream().map(pc -> pc.outputScheduler).filter(os -> os != null).forEach(os -> tr.start(os, this));
// start IB routing strategy
containers.forEach(pc -> tr.start(pc.inboundStrategy, this));
// start router
tr.start(this.router, this);
final PersistentTask startAdaptorAfterRouterIsRunning = new PersistentTask(LOGGER, isRunning, this.persistenceScheduler, 500) {
@Override
public boolean execute() {
if (!router.isReady())
return false;
adaptors.entrySet().forEach(e -> threading.runDaemon(() -> tr.track(e.getValue()).start(), "Adaptor-" + e.getKey().clusterName));
return true;
}
};
// make sure the router is running. Once it is, start the adaptor(s)
startAdaptorAfterRouterIsRunning.process();
if (receiver != null)
tr.track(receiver).start(nodeReciever, this);
// close this session when we're done.
tr.track(session);
// make it known we're here and ready
if (keepNodeRegstered != null)
keepNodeRegstered.process();
else
ptaskReady.set(true);
return this;
}
Aggregations