use of org.neo4j.cluster.com.NetworkSender in project neo4j by neo4j.
the class NetworkSenderReceiverTest method senderThatStartsAfterReceiverShouldEventuallyConnectSuccessfully.
@Test
public void senderThatStartsAfterReceiverShouldEventuallyConnectSuccessfully() throws Throwable {
/*
* This test verifies that a closed channel from a sender to a receiver is removed from the connections
* mapping in the sender. It starts a sender, connects it to a receiver and sends a message.
*
* We should be testing this without resorting to using a NetworkReceiver. But, as prophets Mick Jagger and
* Keith Richards mention in their scriptures, you can't always get what you want. In this case,
* NetworkSender creates on its own the things required to communicate with the outside world, and this
* means it creates actual sockets. To interact with it then, we need to setup listeners for those sockets
* and respond properly. Hence, NetworkReceiver. Yes, this means that this test requires to open actual
* network sockets.
*
* Read on for further hacks in place.
*/
NetworkSender sender = null;
NetworkReceiver receiver = null;
try {
LogProvider logProviderMock = mock(LogProvider.class);
Log logMock = mock(Log.class);
when(logProviderMock.getLog(Matchers.<Class>any())).thenReturn(logMock);
final Semaphore sem = new Semaphore(0);
/*
* A semaphore AND a boolean? Weird, you may think, as the purpose is clearly to step through the
* connection setup/teardown process. So, let's discuss what happens here more clearly.
*
* The sender and receiver are started. Trapped by the semaphore release on listeningAt()
* The sender sends through the first message, it is received by the receiver. Trapped by the semaphore
* release on listeningAt() which is triggered on the first message receive on the receiver
* The receiver is stopped, trapped by the overridden stop() method of the logging service.
* The sender sends a message through, which will trigger the ChannelClosedException. This is where it
* gets tricky. See, normally, since we waited for the semaphore on NetworkReceiver.stop() and an
* happensBefore edge exists and all these good things, it should be certain that the Receiver is
* actually stopped and the message would fail to be sent. That would be too easy though. In reality,
* netty will not wait for all listening threads to stop before returning, so the receiver is not
* guaranteed to not be listening for incoming connections when stop() returns. This happens rarely,
* but the result is that the message "HelloWorld2" should fail with an exception (triggering the warn
* method on the logger) but it doesn't. So we can't block, but we must retry until we know the
* message failed to be sent and the exception happened, which is what this test is all about. We do
* that with a boolean that is tested upon continuously with sent messages until the error happens.
* Then we proceed with...
* The receiver is started. Trapped by the listeningAt() callback.
* The sender sends a message.
* The receiver receives it, trapped by the dummy processor added to the receiver.
*/
final AtomicBoolean senderChannelClosed = new AtomicBoolean(false);
doAnswer(new Answer<Object>() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
senderChannelClosed.set(true);
return null;
}
}).when(logMock).warn(anyString());
receiver = new NetworkReceiver(mock(NetworkReceiver.Monitor.class), new NetworkReceiver.Configuration() {
@Override
public HostnamePort clusterServer() {
return new HostnamePort("127.0.0.1:1235");
}
@Override
public int defaultPort() {
return 5001;
}
@Override
public String name() {
return null;
}
}, NullLogProvider.getInstance()) {
@Override
public void stop() throws Throwable {
super.stop();
sem.release();
}
};
sender = new NetworkSender(mock(NetworkSender.Monitor.class), new NetworkSender.Configuration() {
@Override
public int port() {
return 1235;
}
@Override
public int defaultPort() {
return 5001;
}
}, receiver, logProviderMock);
sender.init();
sender.start();
receiver.addNetworkChannelsListener(new NetworkReceiver.NetworkChannelsListener() {
@Override
public void listeningAt(URI me) {
sem.release();
}
@Override
public void channelOpened(URI to) {
}
@Override
public void channelClosed(URI to) {
}
});
final AtomicBoolean received = new AtomicBoolean(false);
receiver.addMessageProcessor(new MessageProcessor() {
@Override
public boolean process(Message<? extends MessageType> message) {
received.set(true);
sem.release();
return true;
}
});
receiver.init();
receiver.start();
// wait for start from listeningAt() in the NetworkChannelsListener
sem.acquire();
sender.process(Message.to(TestMessage.helloWorld, URI.create("cluster://127.0.0.1:1235"), "Hello World"));
// wait for process from the MessageProcessor
sem.acquire();
receiver.stop();
// wait for overridden stop method in receiver
sem.acquire();
/*
* This is the infernal loop of doom. We keep sending messages until one fails with a ClosedChannelException
* which we have no better way to grab other than through the logger.warn() call which will occur.
*
* This code will hang if the warn we rely on is removed or if the receiver never stops - in general, if
* the closed channel exception is not thrown. This is not an ideal failure mode but it's the best we can
* do, given that NetworkSender is provided with very few things from its environment.
*/
while (!senderChannelClosed.get()) {
sender.process(Message.to(TestMessage.helloWorld, URI.create("cluster://127.0.0.1:1235"), "Hello World2"));
/*
* This sleep is not necessary, it's just nice. If it's ommitted, everything will work, but we'll
* spam messages over the network as fast as possible. Even when the race between send and
* receiver.stop() does not occur, we will still send 3-4 messages through at full speed. If it
* does occur, then we are looking at hundreds. So we just back off a bit and let things work out.
*/
Thread.sleep(5);
}
receiver.start();
// wait for receiver.listeningAt()
sem.acquire();
received.set(false);
sender.process(Message.to(TestMessage.helloWorld, URI.create("cluster://127.0.0.1:1235"), "Hello World3"));
// wait for receiver.process();
sem.acquire();
assertTrue(received.get());
} finally {
if (sender != null) {
sender.stop();
sender.shutdown();
}
if (receiver != null) {
receiver.stop();
receiver.shutdown();
}
}
}
use of org.neo4j.cluster.com.NetworkSender in project neo4j by neo4j.
the class NetworkedServerFactory method newNetworkedServer.
public ProtocolServer newNetworkedServer(final Config config, AcceptorInstanceStore acceptorInstanceStore, ElectionCredentialsProvider electionCredentialsProvider) {
final NetworkReceiver receiver = new NetworkReceiver(networkReceiverMonitor, new NetworkReceiver.Configuration() {
@Override
public HostnamePort clusterServer() {
return config.get(ClusterSettings.cluster_server);
}
@Override
public int defaultPort() {
return 5001;
}
@Override
public String name() {
return null;
}
}, logProvider);
final NetworkSender sender = new NetworkSender(networkSenderMonitor, new NetworkSender.Configuration() {
@Override
public int defaultPort() {
return 5001;
}
@Override
public int port() {
return config.get(ClusterSettings.cluster_server).getPort();
}
}, receiver, logProvider);
ExecutorLifecycleAdapter stateMachineExecutor = new ExecutorLifecycleAdapter(() -> Executors.newSingleThreadExecutor(new NamedThreadFactory("State machine", namedThreadFactoryMonitor)));
final ProtocolServer protocolServer = protocolServerFactory.newProtocolServer(config.get(ClusterSettings.server_id), timeoutStrategy, receiver, sender, acceptorInstanceStore, electionCredentialsProvider, stateMachineExecutor, objectInputStreamFactory, objectOutputStreamFactory, config);
receiver.addNetworkChannelsListener(new NetworkReceiver.NetworkChannelsListener() {
private StateTransitionLogger logger;
@Override
public void listeningAt(URI me) {
protocolServer.listeningAt(me);
if (logger == null) {
logger = new StateTransitionLogger(logProvider, new AtomicBroadcastSerializer(objectInputStreamFactory, objectOutputStreamFactory));
protocolServer.addStateTransitionListener(logger);
}
}
@Override
public void channelOpened(URI to) {
}
@Override
public void channelClosed(URI to) {
}
});
life.add(stateMachineExecutor);
// Timeout timer - triggers every 10 ms
life.add(new Lifecycle() {
private ScheduledExecutorService scheduler;
@Override
public void init() throws Throwable {
protocolServer.getTimeouts().tick(System.currentTimeMillis());
}
@Override
public void start() throws Throwable {
scheduler = Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory("timeout"));
scheduler.scheduleWithFixedDelay(new Runnable() {
@Override
public void run() {
long now = System.currentTimeMillis();
protocolServer.getTimeouts().tick(now);
}
}, 0, 10, TimeUnit.MILLISECONDS);
}
@Override
public void stop() throws Throwable {
scheduler.shutdownNow();
}
@Override
public void shutdown() throws Throwable {
}
});
// Add this last to ensure that timeout service is setup first
life.add(sender);
life.add(receiver);
return protocolServer;
}
Aggregations