use of io.helidon.common.http.Http in project helidon by oracle.
the class MultiPortTest method compositeFromConfig.
@Test
public void compositeFromConfig() throws Exception {
Config config = Config.create(ConfigSources.classpath("multiport/application.yaml"));
webServer = WebServer.builder().host("localhost").routing(Routing.builder().get("/", (req, res) -> res.send("Plain!"))).config(config.get("webserver")).addNamedRouting("secured", Routing.builder().get("/", (req, res) -> res.send("Secured!"))).build();
webServer.start().toCompletableFuture().join();
assertResponse("http", webServer.port(), "/", is("Plain!"));
assertResponse("https", webServer.port("secured"), "/", is("Secured!"));
}
use of io.helidon.common.http.Http in project helidon by oracle.
the class BareResponseImpl method writeStatusAndHeaders.
@Override
public void writeStatusAndHeaders(Http.ResponseStatus status, Map<String, List<String>> headers) {
Objects.requireNonNull(status, "Parameter 'statusCode' was null!");
if (!statusHeadersSent.compareAndSet(false, true)) {
throw new IllegalStateException("Status and headers were already sent");
}
HttpResponseStatus nettyStatus;
if (status instanceof Http.Status || status.reasonPhrase() == null) {
// default reason phrase
nettyStatus = valueOf(status.code());
} else {
// custom reason phrase
nettyStatus = valueOf(status.code(), status.reasonPhrase());
}
response = new DefaultHttpResponse(HTTP_1_1, nettyStatus);
for (Map.Entry<String, List<String>> headerEntry : headers.entrySet()) {
response.headers().add(headerEntry.getKey(), headerEntry.getValue());
}
// Copy HTTP/2 headers to response for correlation (streamId)
requestHeaders.names().stream().filter(header -> header.startsWith(HTTP_2_HEADER_PREFIX)).forEach(header -> response.headers().add(header, requestHeaders.get(header)));
// Check if WebSocket upgrade
boolean isUpgrade = isWebSocketUpgrade(status, headers);
if (isUpgrade) {
isWebSocketUpgrade = true;
} else {
// Set chunked if length not set, may switch to length later
boolean lengthSet = HttpUtil.isContentLengthSet(response);
if (!lengthSet) {
lengthOptimization = status.code() == Http.Status.OK_200.code() && !HttpUtil.isTransferEncodingChunked(response) && !isSseEventStream(headers);
HttpUtil.setTransferEncodingChunked(response, true);
}
}
// if response Connection header is set explicitly to close, we can ignore the following
if (!keepAlive || HttpHeaderValues.CLOSE.contentEqualsIgnoreCase(response.headers().get(HttpHeaderNames.CONNECTION))) {
response.headers().remove(HttpHeaderNames.CONNECTION);
originalEntityAnalyzed.complete(ChannelFutureListener.CLOSE);
} else {
if (!requestContext.requestCompleted()) {
LOGGER.finer(() -> log("Request content not fully read with keep-alive: true", channel));
if (!isWebSocketUpgrade) {
if (requestContext.isDataRequested()) {
// there are pending requests, we have emitted some data and request was not explicitly canceled
// this is a bug in code, where entity is requested and not fully processed
// throwing an exception here is a breaking change (also this may be an intermittent problem
// as it may depend on thread race)
HttpRequest request = requestContext.request();
LOGGER.warning("Entity was requested and not fully consumed before a response is sent. " + "This is not supported. Connection will be closed. Please fix your route for " + request.method() + " " + request.uri());
// let's close this connection, as it is in an unexpected state
response.headers().set(HttpHeaderNames.CONNECTION, HttpHeaderValues.CLOSE);
originalEntityAnalyzed.complete(ChannelFutureListener.CLOSE);
} else {
// we want to consume the entity and keep alive
// entity must be consumed here, so we do not close connection in forwarding handler
// because of unconsumed payload (the following code will only succeed if there is no subscriber)
requestContext.publisher().forEach(DataChunk::release).onComplete(() -> {
response.headers().set(HttpHeaderNames.CONNECTION, HttpHeaderValues.KEEP_ALIVE);
originalEntityAnalyzed.complete(ChannelFutureListener.CLOSE_ON_FAILURE);
}).onError(t -> {
response.headers().set(HttpHeaderNames.CONNECTION, HttpHeaderValues.CLOSE);
originalEntityAnalyzed.complete(ChannelFutureListener.CLOSE);
}).ignoreElement();
}
}
} else if (!headers.containsKey(HttpHeaderNames.CONNECTION.toString())) {
response.headers().set(HttpHeaderNames.CONNECTION, HttpHeaderValues.KEEP_ALIVE);
}
}
// Content length optimization attempt
if (!lengthOptimization) {
requestEntityAnalyzed = requestEntityAnalyzed.thenApply(listener -> {
LOGGER.fine(() -> log("Writing headers %s", status));
requestContext.runInScope(() -> orderedWrite(this::initWriteResponse));
return listener;
});
}
}
Aggregations