use of io.netty.channel.Channel in project elasticsearch by elastic.
the class SimpleNetty4TransportTests method nettyFromThreadPool.
public static MockTransportService nettyFromThreadPool(Settings settings, ThreadPool threadPool, final Version version, ClusterSettings clusterSettings, boolean doHandshake) {
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList());
Transport transport = new Netty4Transport(settings, threadPool, new NetworkService(settings, Collections.emptyList()), BigArrays.NON_RECYCLING_INSTANCE, namedWriteableRegistry, new NoneCircuitBreakerService()) {
@Override
protected Version executeHandshake(DiscoveryNode node, Channel channel, TimeValue timeout) throws IOException, InterruptedException {
if (doHandshake) {
return super.executeHandshake(node, channel, timeout);
} else {
return version.minimumCompatibilityVersion();
}
}
@Override
protected Version getCurrentVersion() {
return version;
}
};
MockTransportService mockTransportService = MockTransportService.createNewService(Settings.EMPTY, transport, version, threadPool, clusterSettings);
mockTransportService.start();
return mockTransportService;
}
use of io.netty.channel.Channel in project vert.x by eclipse.
the class HttpServerImpl method listen.
public synchronized HttpServer listen(int port, String host, Handler<AsyncResult<HttpServer>> listenHandler) {
if (requestStream.handler() == null && wsStream.handler() == null) {
throw new IllegalStateException("Set request or websocket handler first");
}
if (listening) {
throw new IllegalStateException("Already listening");
}
listenContext = vertx.getOrCreateContext();
listening = true;
serverOrigin = (options.isSsl() ? "https" : "http") + "://" + host + ":" + port;
List<HttpVersion> applicationProtocols = options.getAlpnVersions();
if (listenContext.isWorkerContext()) {
applicationProtocols = applicationProtocols.stream().filter(v -> v != HttpVersion.HTTP_2).collect(Collectors.toList());
}
sslHelper.setApplicationProtocols(applicationProtocols);
synchronized (vertx.sharedHttpServers()) {
// Will be updated on bind for a wildcard port
this.actualPort = port;
id = new ServerID(port, host);
HttpServerImpl shared = vertx.sharedHttpServers().get(id);
if (shared == null || port == 0) {
serverChannelGroup = new DefaultChannelGroup("vertx-acceptor-channels", GlobalEventExecutor.INSTANCE);
ServerBootstrap bootstrap = new ServerBootstrap();
bootstrap.group(vertx.getAcceptorEventLoopGroup(), availableWorkers);
bootstrap.channel(NioServerSocketChannel.class);
applyConnectionOptions(bootstrap);
sslHelper.validate(vertx);
bootstrap.childHandler(new ChannelInitializer<Channel>() {
@Override
protected void initChannel(Channel ch) throws Exception {
if (requestStream.isPaused() || wsStream.isPaused()) {
ch.close();
return;
}
ChannelPipeline pipeline = ch.pipeline();
if (sslHelper.isSSL()) {
pipeline.addLast("ssl", sslHelper.createSslHandler(vertx));
if (options.isUseAlpn()) {
pipeline.addLast("alpn", new ApplicationProtocolNegotiationHandler("http/1.1") {
@Override
protected void configurePipeline(ChannelHandlerContext ctx, String protocol) throws Exception {
if (protocol.equals("http/1.1")) {
configureHttp1(pipeline);
} else {
handleHttp2(ch);
}
}
});
} else {
configureHttp1(pipeline);
}
} else {
if (DISABLE_HC2) {
configureHttp1(pipeline);
} else {
pipeline.addLast(new Http1xOrHttp2Handler());
}
}
}
});
addHandlers(this, listenContext);
try {
bindFuture = AsyncResolveConnectHelper.doBind(vertx, port, host, bootstrap);
bindFuture.addListener(res -> {
if (res.failed()) {
vertx.sharedHttpServers().remove(id);
} else {
Channel serverChannel = res.result();
HttpServerImpl.this.actualPort = ((InetSocketAddress) serverChannel.localAddress()).getPort();
serverChannelGroup.add(serverChannel);
metrics = vertx.metricsSPI().createMetrics(this, new SocketAddressImpl(port, host), options);
}
});
} catch (final Throwable t) {
// Make sure we send the exception back through the handler (if any)
if (listenHandler != null) {
vertx.runOnContext(v -> listenHandler.handle(Future.failedFuture(t)));
} else {
// No handler - log so user can see failure
log.error(t);
}
listening = false;
return this;
}
vertx.sharedHttpServers().put(id, this);
actualServer = this;
} else {
// Server already exists with that host/port - we will use that
actualServer = shared;
this.actualPort = shared.actualPort;
addHandlers(actualServer, listenContext);
metrics = vertx.metricsSPI().createMetrics(this, new SocketAddressImpl(port, host), options);
}
actualServer.bindFuture.addListener(future -> {
if (listenHandler != null) {
final AsyncResult<HttpServer> res;
if (future.succeeded()) {
res = Future.succeededFuture(HttpServerImpl.this);
} else {
res = Future.failedFuture(future.cause());
listening = false;
}
listenContext.runOnContext((v) -> listenHandler.handle(res));
} else if (future.failed()) {
listening = false;
log.error(future.cause());
}
});
}
return this;
}
use of io.netty.channel.Channel in project hbase by apache.
the class FanOutOneBlockAsyncDFSOutputHelper method connectToDataNodes.
private static List<Future<Channel>> connectToDataNodes(Configuration conf, DFSClient client, String clientName, LocatedBlock locatedBlock, long maxBytesRcvd, long latestGS, BlockConstructionStage stage, DataChecksum summer, EventLoop eventLoop) {
Enum<?>[] storageTypes = locatedBlock.getStorageTypes();
DatanodeInfo[] datanodeInfos = locatedBlock.getLocations();
boolean connectToDnViaHostname = conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME, DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT);
int timeoutMs = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, READ_TIMEOUT);
ExtendedBlock blockCopy = new ExtendedBlock(locatedBlock.getBlock());
blockCopy.setNumBytes(locatedBlock.getBlockSize());
ClientOperationHeaderProto header = ClientOperationHeaderProto.newBuilder().setBaseHeader(BaseHeaderProto.newBuilder().setBlock(PB_HELPER.convert(blockCopy)).setToken(PB_HELPER.convert(locatedBlock.getBlockToken()))).setClientName(clientName).build();
ChecksumProto checksumProto = DataTransferProtoUtil.toProto(summer);
OpWriteBlockProto.Builder writeBlockProtoBuilder = OpWriteBlockProto.newBuilder().setHeader(header).setStage(OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name())).setPipelineSize(1).setMinBytesRcvd(locatedBlock.getBlock().getNumBytes()).setMaxBytesRcvd(maxBytesRcvd).setLatestGenerationStamp(latestGS).setRequestedChecksum(checksumProto).setCachingStrategy(CachingStrategyProto.newBuilder().setDropBehind(true).build());
List<Future<Channel>> futureList = new ArrayList<>(datanodeInfos.length);
for (int i = 0; i < datanodeInfos.length; i++) {
DatanodeInfo dnInfo = datanodeInfos[i];
Enum<?> storageType = storageTypes[i];
Promise<Channel> promise = eventLoop.newPromise();
futureList.add(promise);
String dnAddr = dnInfo.getXferAddr(connectToDnViaHostname);
new Bootstrap().group(eventLoop).channel(NioSocketChannel.class).option(CONNECT_TIMEOUT_MILLIS, timeoutMs).handler(new ChannelInitializer<Channel>() {
@Override
protected void initChannel(Channel ch) throws Exception {
// we need to get the remote address of the channel so we can only move on after
// channel connected. Leave an empty implementation here because netty does not allow
// a null handler.
}
}).connect(NetUtils.createSocketAddr(dnAddr)).addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) throws Exception {
if (future.isSuccess()) {
initialize(conf, future.channel(), dnInfo, storageType, writeBlockProtoBuilder, timeoutMs, client, locatedBlock.getBlockToken(), promise);
} else {
promise.tryFailure(future.cause());
}
}
});
}
return futureList;
}
use of io.netty.channel.Channel in project hbase by apache.
the class FanOutOneBlockAsyncDFSOutput method flushBuffer.
private Promise<Void> flushBuffer(ByteBuf dataBuf, long nextPacketOffsetInBlock, boolean syncBlock) {
int dataLen = dataBuf.readableBytes();
int chunkLen = summer.getBytesPerChecksum();
int trailingPartialChunkLen = dataLen % chunkLen;
int numChecks = dataLen / chunkLen + (trailingPartialChunkLen != 0 ? 1 : 0);
int checksumLen = numChecks * summer.getChecksumSize();
ByteBuf checksumBuf = alloc.directBuffer(checksumLen);
summer.calculateChunkedSums(dataBuf.nioBuffer(), checksumBuf.nioBuffer(0, checksumLen));
checksumBuf.writerIndex(checksumLen);
PacketHeader header = new PacketHeader(4 + checksumLen + dataLen, nextPacketOffsetInBlock, nextPacketSeqno, false, dataLen, syncBlock);
int headerLen = header.getSerializedSize();
ByteBuf headerBuf = alloc.buffer(headerLen);
header.putInBuffer(headerBuf.nioBuffer(0, headerLen));
headerBuf.writerIndex(headerLen);
long ackedLength = nextPacketOffsetInBlock + dataLen;
Promise<Void> promise = eventLoop.<Void>newPromise().addListener(future -> {
if (future.isSuccess()) {
locatedBlock.getBlock().setNumBytes(ackedLength);
}
});
waitingAckQueue.addLast(new Callback(promise, ackedLength, datanodeList));
for (Channel ch : datanodeList) {
ch.write(headerBuf.duplicate().retain());
ch.write(checksumBuf.duplicate().retain());
ch.writeAndFlush(dataBuf.duplicate().retain());
}
checksumBuf.release();
headerBuf.release();
dataBuf.release();
nextPacketSeqno++;
return promise;
}
use of io.netty.channel.Channel in project jersey by jersey.
the class NettyConnector method apply.
@Override
public Future<?> apply(final ClientRequest jerseyRequest, final AsyncConnectorCallback jerseyCallback) {
final CompletableFuture<Object> settableFuture = new CompletableFuture<>();
final URI requestUri = jerseyRequest.getUri();
String host = requestUri.getHost();
int port = requestUri.getPort() != -1 ? requestUri.getPort() : "https".equals(requestUri.getScheme()) ? 443 : 80;
try {
Bootstrap b = new Bootstrap();
b.group(group).channel(NioSocketChannel.class).handler(new ChannelInitializer<SocketChannel>() {
@Override
protected void initChannel(SocketChannel ch) throws Exception {
ChannelPipeline p = ch.pipeline();
// Enable HTTPS if necessary.
if ("https".equals(requestUri.getScheme())) {
// making client authentication optional for now; it could be extracted to configurable property
JdkSslContext jdkSslContext = new JdkSslContext(client.getSslContext(), true, ClientAuth.NONE);
p.addLast(jdkSslContext.newHandler(ch.alloc()));
}
// http proxy
Configuration config = jerseyRequest.getConfiguration();
final Object proxyUri = config.getProperties().get(ClientProperties.PROXY_URI);
if (proxyUri != null) {
final URI u = getProxyUri(proxyUri);
final String userName = ClientProperties.getValue(config.getProperties(), ClientProperties.PROXY_USERNAME, String.class);
final String password = ClientProperties.getValue(config.getProperties(), ClientProperties.PROXY_PASSWORD, String.class);
p.addLast(new HttpProxyHandler(new InetSocketAddress(u.getHost(), u.getPort() == -1 ? 8080 : u.getPort()), userName, password));
}
p.addLast(new HttpClientCodec());
p.addLast(new ChunkedWriteHandler());
p.addLast(new HttpContentDecompressor());
p.addLast(new JerseyClientHandler(NettyConnector.this, jerseyRequest, jerseyCallback, settableFuture));
}
});
// connect timeout
Integer connectTimeout = ClientProperties.getValue(jerseyRequest.getConfiguration().getProperties(), ClientProperties.CONNECT_TIMEOUT, 0);
if (connectTimeout > 0) {
b.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, connectTimeout);
}
// Make the connection attempt.
final Channel ch = b.connect(host, port).sync().channel();
// guard against prematurely closed channel
final GenericFutureListener<io.netty.util.concurrent.Future<? super Void>> closeListener = new GenericFutureListener<io.netty.util.concurrent.Future<? super Void>>() {
@Override
public void operationComplete(io.netty.util.concurrent.Future<? super Void> future) throws Exception {
if (!settableFuture.isDone()) {
settableFuture.completeExceptionally(new IOException("Channel closed."));
}
}
};
ch.closeFuture().addListener(closeListener);
HttpRequest nettyRequest;
if (jerseyRequest.hasEntity()) {
nettyRequest = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.valueOf(jerseyRequest.getMethod()), requestUri.getRawPath());
} else {
nettyRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.valueOf(jerseyRequest.getMethod()), requestUri.getRawPath());
}
// headers
for (final Map.Entry<String, List<String>> e : jerseyRequest.getStringHeaders().entrySet()) {
nettyRequest.headers().add(e.getKey(), e.getValue());
}
// host header - http 1.1
nettyRequest.headers().add(HttpHeaderNames.HOST, jerseyRequest.getUri().getHost());
if (jerseyRequest.hasEntity()) {
if (jerseyRequest.getLengthLong() == -1) {
HttpUtil.setTransferEncodingChunked(nettyRequest, true);
} else {
nettyRequest.headers().add(HttpHeaderNames.CONTENT_LENGTH, jerseyRequest.getLengthLong());
}
}
if (jerseyRequest.hasEntity()) {
// Send the HTTP request.
ch.writeAndFlush(nettyRequest);
final JerseyChunkedInput jerseyChunkedInput = new JerseyChunkedInput(ch);
jerseyRequest.setStreamProvider(new OutboundMessageContext.StreamProvider() {
@Override
public OutputStream getOutputStream(int contentLength) throws IOException {
return jerseyChunkedInput;
}
});
if (HttpUtil.isTransferEncodingChunked(nettyRequest)) {
ch.write(new HttpChunkedInput(jerseyChunkedInput));
} else {
ch.write(jerseyChunkedInput);
}
executorService.execute(new Runnable() {
@Override
public void run() {
// close listener is not needed any more.
ch.closeFuture().removeListener(closeListener);
try {
jerseyRequest.writeEntity();
} catch (IOException e) {
jerseyCallback.failure(e);
settableFuture.completeExceptionally(e);
}
}
});
ch.flush();
} else {
// close listener is not needed any more.
ch.closeFuture().removeListener(closeListener);
// Send the HTTP request.
ch.writeAndFlush(nettyRequest);
}
} catch (InterruptedException e) {
settableFuture.completeExceptionally(e);
return settableFuture;
}
return settableFuture;
}
Aggregations