use of io.netty.handler.stream.ChunkedStream in project async-http-client by AsyncHttpClient.
the class NettyInputStreamBody method write.
@Override
public void write(Channel channel, NettyResponseFuture<?> future) throws IOException {
final InputStream is = inputStream;
if (future.isStreamConsumed()) {
if (is.markSupported())
is.reset();
else {
LOGGER.warn("Stream has already been consumed and cannot be reset");
return;
}
} else {
future.setStreamConsumed(true);
}
channel.write(new ChunkedStream(is), channel.newProgressivePromise()).addListener(new WriteProgressListener(future, false, getContentLength()) {
public void operationComplete(ChannelProgressiveFuture cf) {
closeSilently(is);
super.operationComplete(cf);
}
});
channel.writeAndFlush(LastHttpContent.EMPTY_LAST_CONTENT, channel.voidPromise());
}
use of io.netty.handler.stream.ChunkedStream in project intellij-plugins by JetBrains.
the class PreviewStaticServer method sendResource.
private static void sendResource(@NotNull HttpRequest request, @NotNull Channel channel, @NotNull Class<?> clazz, @NotNull String resourceName) {
final String fileName = resourceName.substring(resourceName.lastIndexOf('/') + 1);
final HttpResponse response = FileResponses.INSTANCE.prepareSend(request, channel, 0, fileName, EmptyHttpHeaders.INSTANCE);
if (response == null) {
return;
}
Responses.addKeepAliveIfNeed(response, request);
try (final InputStream resource = clazz.getResourceAsStream(resourceName)) {
if (resource == null) {
Responses.send(HttpResponseStatus.NOT_FOUND, channel, request);
return;
}
channel.write(response);
if (request.method() != HttpMethod.HEAD) {
channel.write(new ChunkedStream(resource));
}
} catch (IOException ignored) {
}
final ChannelFuture future = channel.writeAndFlush(LastHttpContent.EMPTY_LAST_CONTENT);
future.addListener(ChannelFutureListener.CLOSE);
}
use of io.netty.handler.stream.ChunkedStream in project intellij-plugins by JetBrains.
the class PreviewStaticServer method sendInlineStyle.
private void sendInlineStyle(@NotNull HttpRequest request, @NotNull Channel channel) {
final HttpResponse response = FileResponses.INSTANCE.prepareSend(request, channel, myInlineStyleTimestamp, INLINE_CSS_FILENAME, EmptyHttpHeaders.INSTANCE);
if (response == null) {
return;
}
Responses.addKeepAliveIfNeed(response, request);
if (myInlineStyle == null) {
Responses.send(HttpResponseStatus.NOT_FOUND, channel, request);
return;
}
channel.write(response);
if (request.method() != HttpMethod.HEAD) {
channel.write(new ChunkedStream(new ByteArrayInputStream(myInlineStyle.getBytes(CharsetToolkit.UTF8_CHARSET))));
}
final ChannelFuture future = channel.writeAndFlush(LastHttpContent.EMPTY_LAST_CONTENT);
future.addListener(ChannelFutureListener.CLOSE);
}
use of io.netty.handler.stream.ChunkedStream in project hadoop by apache.
the class WebHdfsHandler method onOpen.
private void onOpen(ChannelHandlerContext ctx) throws IOException {
final String nnId = params.namenodeId();
final int bufferSize = params.bufferSize();
final long offset = params.offset();
final long length = params.length();
resp = new DefaultHttpResponse(HTTP_1_1, OK);
HttpHeaders headers = resp.headers();
// Allow the UI to access the file
headers.set(ACCESS_CONTROL_ALLOW_METHODS, GET);
headers.set(ACCESS_CONTROL_ALLOW_ORIGIN, "*");
headers.set(CONTENT_TYPE, APPLICATION_OCTET_STREAM);
headers.set(CONNECTION, CLOSE);
final DFSClient dfsclient = newDfsClient(nnId, conf);
HdfsDataInputStream in = dfsclient.createWrappedInputStream(dfsclient.open(path, bufferSize, true));
in.seek(offset);
long contentLength = in.getVisibleLength() - offset;
if (length >= 0) {
contentLength = Math.min(contentLength, length);
}
final InputStream data;
if (contentLength >= 0) {
headers.set(CONTENT_LENGTH, contentLength);
data = new LimitInputStream(in, contentLength);
} else {
data = in;
}
ctx.write(resp);
ctx.writeAndFlush(new ChunkedStream(data) {
@Override
public void close() throws Exception {
super.close();
dfsclient.close();
}
}).addListener(ChannelFutureListener.CLOSE);
}
Aggregations