use of io.servicetalk.tcp.netty.internal.TcpServerChannelInitializer in project servicetalk by apple.
the class HttpRequestEncoderTest method protocolPayloadEndOutboundShouldNotTriggerOnFailedFlush.
@Test
void protocolPayloadEndOutboundShouldNotTriggerOnFailedFlush() throws Exception {
AtomicReference<CloseHandler> closeHandlerRef = new AtomicReference<>();
try (CompositeCloseable resources = newCompositeCloseable()) {
Processor serverCloseTrigger = newCompletableProcessor();
CountDownLatch serverChannelLatch = new CountDownLatch(1);
AtomicReference<Channel> serverChannelRef = new AtomicReference<>();
ReadOnlyTcpServerConfig sConfig = new TcpServerConfig().asReadOnly();
ServerContext serverContext = resources.prepend(TcpServerBinder.bind(localAddress(0), sConfig, false, SEC, null, (channel, observer) -> DefaultNettyConnection.initChannel(channel, SEC.bufferAllocator(), SEC.executor(), SEC.ioExecutor(), forPipelinedRequestResponse(false, channel.config()), defaultFlushStrategy(), null, new TcpServerChannelInitializer(sConfig, observer).andThen(channel2 -> {
serverChannelRef.compareAndSet(null, channel2);
serverChannelLatch.countDown();
}), defaultStrategy(), mock(Protocol.class), observer, false, __ -> false), connection -> {
}).toFuture().get());
ReadOnlyHttpClientConfig cConfig = new HttpClientConfig().asReadOnly();
assert cConfig.h1Config() != null;
NettyConnection<Object, Object> conn = resources.prepend(TcpConnector.connect(null, serverHostAndPort(serverContext), cConfig.tcpConfig(), false, CEC, (channel, connectionObserver) -> {
CloseHandler closeHandler = spy(forPipelinedRequestResponse(true, channel.config()));
closeHandlerRef.compareAndSet(null, closeHandler);
return DefaultNettyConnection.initChannel(channel, CEC.bufferAllocator(), CEC.executor(), CEC.ioExecutor(), closeHandler, defaultFlushStrategy(), null, new TcpClientChannelInitializer(cConfig.tcpConfig(), connectionObserver).andThen(new HttpClientChannelInitializer(getByteBufAllocator(CEC.bufferAllocator()), cConfig.h1Config(), closeHandler)).andThen(channel2 -> channel2.pipeline().addLast(new ChannelInboundHandlerAdapter() {
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) {
// Propagate the user event in the pipeline before
// triggering the test condition.
ctx.fireUserEventTriggered(evt);
if (evt instanceof ChannelInputShutdownReadComplete) {
serverCloseTrigger.onComplete();
}
}
})), defaultStrategy(), HTTP_1_1, connectionObserver, true, __ -> false);
}, NoopTransportObserver.INSTANCE).toFuture().get());
// The server needs to wait to close the conneciton until after the client has established the connection.
serverChannelLatch.await();
Channel serverChannel = serverChannelRef.get();
assertNotNull(serverChannel);
assumeFalse(serverChannel instanceof NioSocketChannel, "Windows doesn't emit ChannelInputShutdownReadComplete. Investigation Required.");
((SocketChannel) serverChannel).config().setSoLinger(0);
// Close and send RST concurrently with client write
serverChannel.close();
StreamingHttpRequest request = reqRespFactory.post("/closeme");
fromSource(serverCloseTrigger).toFuture().get();
Completable write = conn.write(from(request, allocator.fromAscii("Bye"), EmptyHttpHeaders.INSTANCE));
assertThrows(ExecutionException.class, () -> write.toFuture().get());
CloseHandler closeHandler = closeHandlerRef.get();
assertNotNull(closeHandler);
verify(closeHandler, never()).protocolPayloadEndOutbound(any(), any());
}
}
use of io.servicetalk.tcp.netty.internal.TcpServerChannelInitializer in project servicetalk by apple.
the class FlushStrategyOnServerTest method setUp.
private void setUp(final Param param) {
this.interceptor = new OutboundWriteEventsInterceptor();
this.headersFactory = DefaultHttpHeadersFactory.INSTANCE;
final StreamingHttpService service = (ctx, request, responseFactory) -> {
StreamingHttpResponse resp = responseFactory.ok();
if (request.headers().get(USE_EMPTY_RESP_BODY) == null) {
resp.payloadBody(from("Hello", "World"), appSerializerUtf8FixLen());
}
if (request.headers().get(USE_AGGREGATED_RESP) != null) {
return resp.toResponse().map(HttpResponse::toStreamingResponse);
}
return succeeded(resp);
};
final DefaultHttpExecutionContext httpExecutionContext = new DefaultHttpExecutionContext(DEFAULT_ALLOCATOR, globalExecutionContext().ioExecutor(), EXECUTOR_RULE.executor(), param.executionStrategy);
final ReadOnlyHttpServerConfig config = new HttpServerConfig().asReadOnly();
final ReadOnlyTcpServerConfig tcpReadOnly = new TcpServerConfig().asReadOnly();
try {
serverContext = TcpServerBinder.bind(localAddress(0), tcpReadOnly, true, httpExecutionContext, null, (channel, observer) -> {
final ConnectionObserver connectionObserver = config.tcpConfig().transportObserver().onNewConnection(channel.localAddress(), channel.remoteAddress());
return initChannel(channel, httpExecutionContext, config, new TcpServerChannelInitializer(tcpReadOnly, connectionObserver).andThen(channel1 -> channel1.pipeline().addLast(interceptor)), service, true, connectionObserver);
}, connection -> connection.process(true)).map(delegate -> new NettyHttpServerContext(delegate, service, httpExecutionContext)).toFuture().get();
} catch (Exception e) {
fail(e);
}
client = HttpClients.forSingleAddress(serverHostAndPort(serverContext)).protocols(h1Default()).buildBlocking();
}
Aggregations