Search in sources :

Example 1 with Bootstrap

use of io.netty.bootstrap.Bootstrap in project flink by apache.

the class NettyConnectionManagerTest method testManualConfiguration.

/**
	 * Tests that the number of arenas and threads can be configured manually.
	 */
@Test
public void testManualConfiguration() throws Exception {
    // Expected numbers
    int numberOfArenas = 1;
    int numberOfClientThreads = 3;
    int numberOfServerThreads = 4;
    // Expected number of threads
    Configuration flinkConfig = new Configuration();
    flinkConfig.setInteger(NettyConfig.NUM_ARENAS, numberOfArenas);
    flinkConfig.setInteger(NettyConfig.NUM_THREADS_CLIENT, 3);
    flinkConfig.setInteger(NettyConfig.NUM_THREADS_SERVER, 4);
    NettyConfig config = new NettyConfig(InetAddress.getLocalHost(), NetUtils.getAvailablePort(), 1024, 1337, flinkConfig);
    NettyConnectionManager connectionManager = new NettyConnectionManager(config);
    connectionManager.start(mock(ResultPartitionProvider.class), mock(TaskEventDispatcher.class), mock(NetworkBufferPool.class));
    assertEquals(numberOfArenas, connectionManager.getBufferPool().getNumberOfArenas());
    {
        // Client event loop group
        Bootstrap boostrap = connectionManager.getClient().getBootstrap();
        EventLoopGroup group = boostrap.group();
        Field f = group.getClass().getSuperclass().getSuperclass().getDeclaredField("children");
        f.setAccessible(true);
        Object[] eventExecutors = (Object[]) f.get(group);
        assertEquals(numberOfClientThreads, eventExecutors.length);
    }
    {
        // Server event loop group
        ServerBootstrap bootstrap = connectionManager.getServer().getBootstrap();
        EventLoopGroup group = bootstrap.group();
        Field f = group.getClass().getSuperclass().getSuperclass().getDeclaredField("children");
        f.setAccessible(true);
        Object[] eventExecutors = (Object[]) f.get(group);
        assertEquals(numberOfServerThreads, eventExecutors.length);
    }
    {
        // Server child event loop group
        ServerBootstrap bootstrap = connectionManager.getServer().getBootstrap();
        EventLoopGroup group = bootstrap.childGroup();
        Field f = group.getClass().getSuperclass().getSuperclass().getDeclaredField("children");
        f.setAccessible(true);
        Object[] eventExecutors = (Object[]) f.get(group);
        assertEquals(numberOfServerThreads, eventExecutors.length);
    }
}
Also used : Field(java.lang.reflect.Field) EventLoopGroup(io.netty.channel.EventLoopGroup) Configuration(org.apache.flink.configuration.Configuration) Bootstrap(io.netty.bootstrap.Bootstrap) ServerBootstrap(io.netty.bootstrap.ServerBootstrap) ResultPartitionProvider(org.apache.flink.runtime.io.network.partition.ResultPartitionProvider) TaskEventDispatcher(org.apache.flink.runtime.io.network.TaskEventDispatcher) NetworkBufferPool(org.apache.flink.runtime.io.network.buffer.NetworkBufferPool) ServerBootstrap(io.netty.bootstrap.ServerBootstrap) Test(org.junit.Test)

Example 2 with Bootstrap

use of io.netty.bootstrap.Bootstrap in project flink by apache.

the class NettyConnectionManagerTest method testMatchingNumberOfArenasAndThreadsAsDefault.

/**
	 * Tests that the number of arenas and number of threads of the client and
	 * server are set to the same number, that is the number of configured
	 * task slots.
	 */
@Test
public void testMatchingNumberOfArenasAndThreadsAsDefault() throws Exception {
    // Expected number of arenas and threads
    int numberOfSlots = 2;
    NettyConfig config = new NettyConfig(InetAddress.getLocalHost(), NetUtils.getAvailablePort(), 1024, numberOfSlots, new Configuration());
    NettyConnectionManager connectionManager = new NettyConnectionManager(config);
    connectionManager.start(mock(ResultPartitionProvider.class), mock(TaskEventDispatcher.class), mock(NetworkBufferPool.class));
    assertEquals(numberOfSlots, connectionManager.getBufferPool().getNumberOfArenas());
    {
        // Client event loop group
        Bootstrap boostrap = connectionManager.getClient().getBootstrap();
        EventLoopGroup group = boostrap.group();
        Field f = group.getClass().getSuperclass().getSuperclass().getDeclaredField("children");
        f.setAccessible(true);
        Object[] eventExecutors = (Object[]) f.get(group);
        assertEquals(numberOfSlots, eventExecutors.length);
    }
    {
        // Server event loop group
        ServerBootstrap bootstrap = connectionManager.getServer().getBootstrap();
        EventLoopGroup group = bootstrap.group();
        Field f = group.getClass().getSuperclass().getSuperclass().getDeclaredField("children");
        f.setAccessible(true);
        Object[] eventExecutors = (Object[]) f.get(group);
        assertEquals(numberOfSlots, eventExecutors.length);
    }
    {
        // Server child event loop group
        ServerBootstrap bootstrap = connectionManager.getServer().getBootstrap();
        EventLoopGroup group = bootstrap.childGroup();
        Field f = group.getClass().getSuperclass().getSuperclass().getDeclaredField("children");
        f.setAccessible(true);
        Object[] eventExecutors = (Object[]) f.get(group);
        assertEquals(numberOfSlots, eventExecutors.length);
    }
}
Also used : Field(java.lang.reflect.Field) EventLoopGroup(io.netty.channel.EventLoopGroup) Configuration(org.apache.flink.configuration.Configuration) Bootstrap(io.netty.bootstrap.Bootstrap) ServerBootstrap(io.netty.bootstrap.ServerBootstrap) ResultPartitionProvider(org.apache.flink.runtime.io.network.partition.ResultPartitionProvider) TaskEventDispatcher(org.apache.flink.runtime.io.network.TaskEventDispatcher) NetworkBufferPool(org.apache.flink.runtime.io.network.buffer.NetworkBufferPool) ServerBootstrap(io.netty.bootstrap.ServerBootstrap) Test(org.junit.Test)

Example 3 with Bootstrap

use of io.netty.bootstrap.Bootstrap in project flink by apache.

the class KvStateServerTest method testSimpleRequest.

/**
	 * Tests a simple successful query via a SocketChannel.
	 */
@Test
public void testSimpleRequest() throws Exception {
    KvStateServer server = null;
    Bootstrap bootstrap = null;
    try {
        KvStateRegistry registry = new KvStateRegistry();
        KvStateRequestStats stats = new AtomicKvStateRequestStats();
        server = new KvStateServer(InetAddress.getLocalHost(), 0, 1, 1, registry, stats);
        server.start();
        KvStateServerAddress serverAddress = server.getAddress();
        int numKeyGroups = 1;
        AbstractStateBackend abstractBackend = new MemoryStateBackend();
        DummyEnvironment dummyEnv = new DummyEnvironment("test", 1, 0);
        dummyEnv.setKvStateRegistry(registry);
        AbstractKeyedStateBackend<Integer> backend = abstractBackend.createKeyedStateBackend(dummyEnv, new JobID(), "test_op", IntSerializer.INSTANCE, numKeyGroups, new KeyGroupRange(0, 0), registry.createTaskRegistry(new JobID(), new JobVertexID()));
        final KvStateServerHandlerTest.TestRegistryListener registryListener = new KvStateServerHandlerTest.TestRegistryListener();
        registry.registerListener(registryListener);
        ValueStateDescriptor<Integer> desc = new ValueStateDescriptor<>("any", IntSerializer.INSTANCE);
        desc.setQueryable("vanilla");
        ValueState<Integer> state = backend.getPartitionedState(VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, desc);
        // Update KvState
        int expectedValue = 712828289;
        int key = 99812822;
        backend.setCurrentKey(key);
        state.update(expectedValue);
        // Request
        byte[] serializedKeyAndNamespace = KvStateRequestSerializer.serializeKeyAndNamespace(key, IntSerializer.INSTANCE, VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE);
        // Connect to the server
        final BlockingQueue<ByteBuf> responses = new LinkedBlockingQueue<>();
        bootstrap = createBootstrap(new LengthFieldBasedFrameDecoder(Integer.MAX_VALUE, 0, 4, 0, 4), new ChannelInboundHandlerAdapter() {

            @Override
            public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
                responses.add((ByteBuf) msg);
            }
        });
        Channel channel = bootstrap.connect(serverAddress.getHost(), serverAddress.getPort()).sync().channel();
        long requestId = Integer.MAX_VALUE + 182828L;
        assertTrue(registryListener.registrationName.equals("vanilla"));
        ByteBuf request = KvStateRequestSerializer.serializeKvStateRequest(channel.alloc(), requestId, registryListener.kvStateId, serializedKeyAndNamespace);
        channel.writeAndFlush(request);
        ByteBuf buf = responses.poll(TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
        assertEquals(KvStateRequestType.REQUEST_RESULT, KvStateRequestSerializer.deserializeHeader(buf));
        KvStateRequestResult response = KvStateRequestSerializer.deserializeKvStateRequestResult(buf);
        assertEquals(requestId, response.getRequestId());
        int actualValue = KvStateRequestSerializer.deserializeValue(response.getSerializedResult(), IntSerializer.INSTANCE);
        assertEquals(expectedValue, actualValue);
    } finally {
        if (server != null) {
            server.shutDown();
        }
        if (bootstrap != null) {
            EventLoopGroup group = bootstrap.group();
            if (group != null) {
                group.shutdownGracefully();
            }
        }
    }
}
Also used : KvStateRegistry(org.apache.flink.runtime.query.KvStateRegistry) KvStateRequestResult(org.apache.flink.runtime.query.netty.message.KvStateRequestResult) MemoryStateBackend(org.apache.flink.runtime.state.memory.MemoryStateBackend) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) KeyGroupRange(org.apache.flink.runtime.state.KeyGroupRange) KvStateServerAddress(org.apache.flink.runtime.query.KvStateServerAddress) ChannelHandlerContext(io.netty.channel.ChannelHandlerContext) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) ByteBuf(io.netty.buffer.ByteBuf) ValueStateDescriptor(org.apache.flink.api.common.state.ValueStateDescriptor) Bootstrap(io.netty.bootstrap.Bootstrap) LengthFieldBasedFrameDecoder(io.netty.handler.codec.LengthFieldBasedFrameDecoder) AbstractStateBackend(org.apache.flink.runtime.state.AbstractStateBackend) NioSocketChannel(io.netty.channel.socket.nio.NioSocketChannel) SocketChannel(io.netty.channel.socket.SocketChannel) Channel(io.netty.channel.Channel) DummyEnvironment(org.apache.flink.runtime.operators.testutils.DummyEnvironment) EventLoopGroup(io.netty.channel.EventLoopGroup) NioEventLoopGroup(io.netty.channel.nio.NioEventLoopGroup) JobID(org.apache.flink.api.common.JobID) ChannelInboundHandlerAdapter(io.netty.channel.ChannelInboundHandlerAdapter) Test(org.junit.Test)

Example 4 with Bootstrap

use of io.netty.bootstrap.Bootstrap in project hbase by apache.

the class FanOutOneBlockAsyncDFSOutputHelper method connectToDataNodes.

private static List<Future<Channel>> connectToDataNodes(Configuration conf, DFSClient client, String clientName, LocatedBlock locatedBlock, long maxBytesRcvd, long latestGS, BlockConstructionStage stage, DataChecksum summer, EventLoop eventLoop) {
    Enum<?>[] storageTypes = locatedBlock.getStorageTypes();
    DatanodeInfo[] datanodeInfos = locatedBlock.getLocations();
    boolean connectToDnViaHostname = conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME, DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT);
    int timeoutMs = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, READ_TIMEOUT);
    ExtendedBlock blockCopy = new ExtendedBlock(locatedBlock.getBlock());
    blockCopy.setNumBytes(locatedBlock.getBlockSize());
    ClientOperationHeaderProto header = ClientOperationHeaderProto.newBuilder().setBaseHeader(BaseHeaderProto.newBuilder().setBlock(PB_HELPER.convert(blockCopy)).setToken(PB_HELPER.convert(locatedBlock.getBlockToken()))).setClientName(clientName).build();
    ChecksumProto checksumProto = DataTransferProtoUtil.toProto(summer);
    OpWriteBlockProto.Builder writeBlockProtoBuilder = OpWriteBlockProto.newBuilder().setHeader(header).setStage(OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name())).setPipelineSize(1).setMinBytesRcvd(locatedBlock.getBlock().getNumBytes()).setMaxBytesRcvd(maxBytesRcvd).setLatestGenerationStamp(latestGS).setRequestedChecksum(checksumProto).setCachingStrategy(CachingStrategyProto.newBuilder().setDropBehind(true).build());
    List<Future<Channel>> futureList = new ArrayList<>(datanodeInfos.length);
    for (int i = 0; i < datanodeInfos.length; i++) {
        DatanodeInfo dnInfo = datanodeInfos[i];
        Enum<?> storageType = storageTypes[i];
        Promise<Channel> promise = eventLoop.newPromise();
        futureList.add(promise);
        String dnAddr = dnInfo.getXferAddr(connectToDnViaHostname);
        new Bootstrap().group(eventLoop).channel(NioSocketChannel.class).option(CONNECT_TIMEOUT_MILLIS, timeoutMs).handler(new ChannelInitializer<Channel>() {

            @Override
            protected void initChannel(Channel ch) throws Exception {
            // we need to get the remote address of the channel so we can only move on after
            // channel connected. Leave an empty implementation here because netty does not allow
            // a null handler.
            }
        }).connect(NetUtils.createSocketAddr(dnAddr)).addListener(new ChannelFutureListener() {

            @Override
            public void operationComplete(ChannelFuture future) throws Exception {
                if (future.isSuccess()) {
                    initialize(conf, future.channel(), dnInfo, storageType, writeBlockProtoBuilder, timeoutMs, client, locatedBlock.getBlockToken(), promise);
                } else {
                    promise.tryFailure(future.cause());
                }
            }
        });
    }
    return futureList;
}
Also used : ChannelFuture(io.netty.channel.ChannelFuture) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) NioSocketChannel(io.netty.channel.socket.nio.NioSocketChannel) Channel(io.netty.channel.Channel) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ArrayList(java.util.ArrayList) ChecksumProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto) OpWriteBlockProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto) ChannelFutureListener(io.netty.channel.ChannelFutureListener) UnresolvedLinkException(org.apache.hadoop.fs.UnresolvedLinkException) LeaseExpiredException(org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException) InvalidBlockTokenException(org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException) InvocationTargetException(java.lang.reflect.InvocationTargetException) IOException(java.io.IOException) RemoteException(org.apache.hadoop.ipc.RemoteException) ChannelFuture(io.netty.channel.ChannelFuture) Future(io.netty.util.concurrent.Future) Bootstrap(io.netty.bootstrap.Bootstrap) ClientOperationHeaderProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto)

Example 5 with Bootstrap

use of io.netty.bootstrap.Bootstrap in project hive by apache.

the class Rpc method createClient.

/**
   * Creates an RPC client for a server running on the given remote host and port.
   *
   * @param config RPC configuration data.
   * @param eloop Event loop for managing the connection.
   * @param host Host name or IP address to connect to.
   * @param port Port where server is listening.
   * @param clientId The client ID that identifies the connection.
   * @param secret Secret for authenticating the client with the server.
   * @param dispatcher Dispatcher used to handle RPC calls.
   * @return A future that can be used to monitor the creation of the RPC object.
   */
public static Promise<Rpc> createClient(Map<String, String> config, final NioEventLoopGroup eloop, String host, int port, final String clientId, final String secret, final RpcDispatcher dispatcher) throws Exception {
    final RpcConfiguration rpcConf = new RpcConfiguration(config);
    int connectTimeoutMs = (int) rpcConf.getConnectTimeoutMs();
    final ChannelFuture cf = new Bootstrap().group(eloop).handler(new ChannelInboundHandlerAdapter() {
    }).channel(NioSocketChannel.class).option(ChannelOption.SO_KEEPALIVE, true).option(ChannelOption.CONNECT_TIMEOUT_MILLIS, connectTimeoutMs).connect(host, port);
    final Promise<Rpc> promise = eloop.next().newPromise();
    final AtomicReference<Rpc> rpc = new AtomicReference<Rpc>();
    // Set up a timeout to undo everything.
    final Runnable timeoutTask = new Runnable() {

        @Override
        public void run() {
            promise.setFailure(new TimeoutException("Timed out waiting for RPC server connection."));
        }
    };
    final ScheduledFuture<?> timeoutFuture = eloop.schedule(timeoutTask, connectTimeoutMs, TimeUnit.MILLISECONDS);
    // The channel listener instantiates the Rpc instance when the connection is established,
    // and initiates the SASL handshake.
    cf.addListener(new ChannelFutureListener() {

        @Override
        public void operationComplete(ChannelFuture cf) throws Exception {
            if (cf.isSuccess()) {
                SaslClientHandler saslHandler = new SaslClientHandler(rpcConf, clientId, promise, timeoutFuture, secret, dispatcher);
                Rpc rpc = createRpc(rpcConf, saslHandler, (SocketChannel) cf.channel(), eloop);
                saslHandler.rpc = rpc;
                saslHandler.sendHello(cf.channel());
            } else {
                promise.setFailure(cf.cause());
            }
        }
    });
    // Handle cancellation of the promise.
    promise.addListener(new GenericFutureListener<Promise<Rpc>>() {

        @Override
        public void operationComplete(Promise<Rpc> p) {
            if (p.isCancelled()) {
                cf.cancel(true);
            }
        }
    });
    return promise;
}
Also used : ChannelFuture(io.netty.channel.ChannelFuture) NioSocketChannel(io.netty.channel.socket.nio.NioSocketChannel) SocketChannel(io.netty.channel.socket.SocketChannel) AtomicReference(java.util.concurrent.atomic.AtomicReference) ChannelFutureListener(io.netty.channel.ChannelFutureListener) TimeoutException(java.util.concurrent.TimeoutException) SaslException(javax.security.sasl.SaslException) IOException(java.io.IOException) NioSocketChannel(io.netty.channel.socket.nio.NioSocketChannel) Promise(io.netty.util.concurrent.Promise) Bootstrap(io.netty.bootstrap.Bootstrap) ChannelInboundHandlerAdapter(io.netty.channel.ChannelInboundHandlerAdapter) TimeoutException(java.util.concurrent.TimeoutException)

Aggregations

Bootstrap (io.netty.bootstrap.Bootstrap)163 Channel (io.netty.channel.Channel)81 ServerBootstrap (io.netty.bootstrap.ServerBootstrap)73 NioEventLoopGroup (io.netty.channel.nio.NioEventLoopGroup)68 NioSocketChannel (io.netty.channel.socket.nio.NioSocketChannel)68 ChannelFuture (io.netty.channel.ChannelFuture)62 EventLoopGroup (io.netty.channel.EventLoopGroup)61 Test (org.junit.Test)61 ChannelInboundHandlerAdapter (io.netty.channel.ChannelInboundHandlerAdapter)49 InetSocketAddress (java.net.InetSocketAddress)49 ChannelHandlerContext (io.netty.channel.ChannelHandlerContext)37 SocketChannel (io.netty.channel.socket.SocketChannel)33 ChannelPipeline (io.netty.channel.ChannelPipeline)31 LocalAddress (io.netty.channel.local.LocalAddress)26 ClosedChannelException (java.nio.channels.ClosedChannelException)26 LocalChannel (io.netty.channel.local.LocalChannel)22 LocalServerChannel (io.netty.channel.local.LocalServerChannel)22 CountDownLatch (java.util.concurrent.CountDownLatch)22 ChannelFutureListener (io.netty.channel.ChannelFutureListener)20 NioServerSocketChannel (io.netty.channel.socket.nio.NioServerSocketChannel)18