use of org.apache.flink.shaded.netty4.io.netty.handler.codec.http.DefaultHttpResponse in project netty by netty.
the class Http2ServerDowngraderTest method testUpgradeHeaders.
@Test
public void testUpgradeHeaders() throws Exception {
EmbeddedChannel ch = new EmbeddedChannel(new Http2ServerDowngrader());
HttpResponse response = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK);
assertTrue(ch.writeOutbound(response));
Http2HeadersFrame headersFrame = ch.readOutbound();
assertThat(headersFrame.headers().status().toString(), is("200"));
assertFalse(headersFrame.isEndStream());
assertThat(ch.readOutbound(), is(nullValue()));
assertFalse(ch.finish());
}
use of org.apache.flink.shaded.netty4.io.netty.handler.codec.http.DefaultHttpResponse in project hadoop by apache.
the class RestCsrfPreventionFilterHandler method exceptionCaught.
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
LOG.error("Exception in " + this.getClass().getSimpleName(), cause);
sendResponseAndClose(ctx, new DefaultHttpResponse(HTTP_1_1, INTERNAL_SERVER_ERROR));
}
use of org.apache.flink.shaded.netty4.io.netty.handler.codec.http.DefaultHttpResponse in project hadoop by apache.
the class HdfsWriter method exceptionCaught.
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
releaseDfsResources();
DefaultHttpResponse resp = ExceptionHandler.exceptionCaught(cause);
resp.headers().set(CONNECTION, CLOSE);
ctx.writeAndFlush(resp).addListener(ChannelFutureListener.CLOSE);
if (LOG != null && LOG.isDebugEnabled()) {
LOG.debug("Exception in channel handler ", cause);
}
}
use of org.apache.flink.shaded.netty4.io.netty.handler.codec.http.DefaultHttpResponse in project hadoop by apache.
the class WebHdfsHandler method onCreate.
private void onCreate(ChannelHandlerContext ctx) throws IOException, URISyntaxException {
writeContinueHeader(ctx);
final String nnId = params.namenodeId();
final int bufferSize = params.bufferSize();
final short replication = params.replication();
final long blockSize = params.blockSize();
final FsPermission unmaskedPermission = params.unmaskedPermission();
final FsPermission permission = unmaskedPermission == null ? params.permission() : FsCreateModes.create(params.permission(), unmaskedPermission);
final boolean createParent = params.createParent();
EnumSet<CreateFlag> flags = params.createFlag();
if (flags.equals(EMPTY_CREATE_FLAG)) {
flags = params.overwrite() ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE) : EnumSet.of(CreateFlag.CREATE);
} else {
if (params.overwrite()) {
flags.add(CreateFlag.OVERWRITE);
}
}
final DFSClient dfsClient = newDfsClient(nnId, confForCreate);
OutputStream out = dfsClient.createWrappedOutputStream(dfsClient.create(path, permission, flags, createParent, replication, blockSize, null, bufferSize, null), null);
resp = new DefaultHttpResponse(HTTP_1_1, CREATED);
final URI uri = new URI(HDFS_URI_SCHEME, nnId, path, null, null);
resp.headers().set(LOCATION, uri.toString());
resp.headers().set(CONTENT_LENGTH, 0);
resp.headers().set(ACCESS_CONTROL_ALLOW_ORIGIN, "*");
ctx.pipeline().replace(this, HdfsWriter.class.getSimpleName(), new HdfsWriter(dfsClient, out, resp));
}
use of org.apache.flink.shaded.netty4.io.netty.handler.codec.http.DefaultHttpResponse in project netty-socketio by mrniko.
the class PollingTransport method sendError.
private void sendError(ChannelHandlerContext ctx) {
HttpResponse res = new DefaultHttpResponse(HTTP_1_1, HttpResponseStatus.INTERNAL_SERVER_ERROR);
ctx.channel().writeAndFlush(res).addListener(ChannelFutureListener.CLOSE);
}
Aggregations