use of org.apache.flink.shaded.netty4.io.netty.handler.codec.http.HttpClientCodec in project wso2-synapse by wso2.
the class ChannelPipelineInitializer method initializeHttpProducerChannel.
private void initializeHttpProducerChannel(SocketChannel ch) {
ChannelPipeline pipeline = ch.pipeline();
// Enable HTTPS if necessary.
if (sslCtx != null) {
pipeline.addLast(sslCtx.newHandler(ch.alloc()));
}
pipeline.addLast(new HttpClientCodec());
pipeline.addLast(new HttpContentDecompressor());
pipeline.addLast(new HttpResponseProcessHandler(producerOutgoingMessage));
}
use of org.apache.flink.shaded.netty4.io.netty.handler.codec.http.HttpClientCodec in project reactor-netty by reactor.
the class HttpClientConfig method configureHttp11OrH2CleartextPipeline.
static void configureHttp11OrH2CleartextPipeline(ChannelPipeline p, boolean acceptGzip, HttpResponseDecoderSpec decoder, Http2Settings http2Settings, @Nullable ChannelMetricsRecorder metricsRecorder, ConnectionObserver observer, ChannelOperations.OnSetup opsFactory, @Nullable Function<String, String> uriTagValue) {
HttpClientCodec httpClientCodec = new HttpClientCodec(decoder.maxInitialLineLength(), decoder.maxHeaderSize(), decoder.maxChunkSize(), decoder.failOnMissingResponse, decoder.validateHeaders(), decoder.initialBufferSize(), decoder.parseHttpAfterConnectRequest, decoder.allowDuplicateContentLengths());
Http2FrameCodecBuilder http2FrameCodecBuilder = Http2FrameCodecBuilder.forClient().validateHeaders(decoder.validateHeaders()).initialSettings(http2Settings);
if (p.get(NettyPipeline.LoggingHandler) != null) {
http2FrameCodecBuilder.frameLogger(new Http2FrameLogger(LogLevel.DEBUG, "reactor.netty.http.client.h2"));
}
Http2FrameCodec http2FrameCodec = http2FrameCodecBuilder.build();
Http2ClientUpgradeCodec upgradeCodec = new Http2ClientUpgradeCodec(http2FrameCodec, new H2CleartextCodec(http2FrameCodec, opsFactory, acceptGzip));
HttpClientUpgradeHandler upgradeHandler = new HttpClientUpgradeHandler(httpClientCodec, upgradeCodec, decoder.h2cMaxContentLength());
p.addBefore(NettyPipeline.ReactiveBridge, null, httpClientCodec).addBefore(NettyPipeline.ReactiveBridge, NettyPipeline.H2CUpgradeHandler, upgradeHandler).addBefore(NettyPipeline.ReactiveBridge, NettyPipeline.HttpTrafficHandler, new HttpTrafficHandler(observer));
if (acceptGzip) {
p.addBefore(NettyPipeline.ReactiveBridge, NettyPipeline.HttpDecompressor, new HttpContentDecompressor());
}
if (metricsRecorder != null) {
if (metricsRecorder instanceof HttpClientMetricsRecorder) {
ChannelHandler handler;
if (metricsRecorder instanceof MicrometerHttpClientMetricsRecorder) {
handler = new MicrometerHttpClientMetricsHandler((MicrometerHttpClientMetricsRecorder) metricsRecorder, uriTagValue);
} else if (metricsRecorder instanceof ContextAwareHttpClientMetricsRecorder) {
handler = new ContextAwareHttpClientMetricsHandler((ContextAwareHttpClientMetricsRecorder) metricsRecorder, uriTagValue);
} else {
handler = new HttpClientMetricsHandler((HttpClientMetricsRecorder) metricsRecorder, uriTagValue);
}
p.addBefore(NettyPipeline.ReactiveBridge, NettyPipeline.HttpMetricsHandler, handler);
}
}
}
use of org.apache.flink.shaded.netty4.io.netty.handler.codec.http.HttpClientCodec in project reactor-netty by reactor.
the class HttpClientConfig method configureHttp11Pipeline.
static void configureHttp11Pipeline(ChannelPipeline p, boolean acceptGzip, HttpResponseDecoderSpec decoder, @Nullable ChannelMetricsRecorder metricsRecorder, @Nullable Function<String, String> uriTagValue) {
p.addBefore(NettyPipeline.ReactiveBridge, NettyPipeline.HttpCodec, new HttpClientCodec(decoder.maxInitialLineLength(), decoder.maxHeaderSize(), decoder.maxChunkSize(), decoder.failOnMissingResponse, decoder.validateHeaders(), decoder.initialBufferSize(), decoder.parseHttpAfterConnectRequest, decoder.allowDuplicateContentLengths()));
if (acceptGzip) {
p.addAfter(NettyPipeline.HttpCodec, NettyPipeline.HttpDecompressor, new HttpContentDecompressor());
}
if (metricsRecorder != null) {
if (metricsRecorder instanceof HttpClientMetricsRecorder) {
ChannelHandler handler;
if (metricsRecorder instanceof MicrometerHttpClientMetricsRecorder) {
handler = new MicrometerHttpClientMetricsHandler((MicrometerHttpClientMetricsRecorder) metricsRecorder, uriTagValue);
} else if (metricsRecorder instanceof ContextAwareHttpClientMetricsRecorder) {
handler = new ContextAwareHttpClientMetricsHandler((ContextAwareHttpClientMetricsRecorder) metricsRecorder, uriTagValue);
} else {
handler = new HttpClientMetricsHandler((HttpClientMetricsRecorder) metricsRecorder, uriTagValue);
}
p.addBefore(NettyPipeline.ReactiveBridge, NettyPipeline.HttpMetricsHandler, handler);
}
}
}
use of org.apache.flink.shaded.netty4.io.netty.handler.codec.http.HttpClientCodec in project reactor-netty by reactor.
the class HttpServerTests method httpPipelining.
@Test
void httpPipelining() throws Exception {
AtomicInteger i = new AtomicInteger();
disposableServer = createServer().handle((req, resp) -> resp.header(HttpHeaderNames.CONTENT_LENGTH, "1").sendString(Mono.just(i.incrementAndGet()).flatMap(d -> Mono.delay(Duration.ofSeconds(4 - d)).map(x -> d + "\n")))).bindNow();
DefaultFullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/plaintext");
CountDownLatch latch = new CountDownLatch(6);
Connection client = TcpClient.create().port(disposableServer.port()).handle((in, out) -> {
in.withConnection(x -> x.addHandlerFirst(new HttpClientCodec())).receiveObject().ofType(DefaultHttpContent.class).as(ByteBufFlux::fromInbound).asString().log().map(Integer::parseInt).subscribe(d -> {
for (int x = 0; x < d; x++) {
latch.countDown();
}
});
return out.sendObject(Flux.just(request.retain(), request.retain(), request.retain())).neverComplete();
}).wiretap(true).connectNow();
assertThat(latch.await(45, TimeUnit.SECONDS)).as("latch await").isTrue();
client.disposeNow();
}
use of org.apache.flink.shaded.netty4.io.netty.handler.codec.http.HttpClientCodec in project reactor-netty by reactor.
the class HttpClientTest method httpClientResponseConfigInjectAttributes.
@Test
void httpClientResponseConfigInjectAttributes() {
AtomicReference<Channel> channelRef = new AtomicReference<>();
AtomicBoolean validate = new AtomicBoolean();
AtomicInteger chunkSize = new AtomicInteger();
AtomicBoolean allowDuplicateContentLengths = new AtomicBoolean();
disposableServer = createServer().handle((req, resp) -> req.receive().then(resp.sendNotFound())).bindNow();
createHttpClientForContextWithAddress().httpResponseDecoder(opt -> opt.maxInitialLineLength(123).maxHeaderSize(456).maxChunkSize(789).validateHeaders(false).initialBufferSize(10).failOnMissingResponse(true).parseHttpAfterConnectRequest(true).allowDuplicateContentLengths(true)).doOnConnected(c -> {
channelRef.set(c.channel());
HttpClientCodec codec = c.channel().pipeline().get(HttpClientCodec.class);
HttpObjectDecoder decoder = (HttpObjectDecoder) getValueReflection(codec, "inboundHandler", 1);
chunkSize.set((Integer) getValueReflection(decoder, "maxChunkSize", 2));
validate.set((Boolean) getValueReflection(decoder, "validateHeaders", 2));
allowDuplicateContentLengths.set((Boolean) getValueReflection(decoder, "allowDuplicateContentLengths", 2));
}).post().uri("/").send(ByteBufFlux.fromString(Mono.just("bodysample"))).responseContent().aggregate().asString().block(Duration.ofSeconds(30));
assertThat(channelRef.get()).isNotNull();
assertThat(chunkSize).as("line length").hasValue(789);
assertThat(validate).as("validate headers").isFalse();
assertThat(allowDuplicateContentLengths).as("allow duplicate Content-Length").isTrue();
}
Aggregations