use of io.netty.bootstrap.Bootstrap in project flink by apache.
the class NettyConnectionManagerTest method testManualConfiguration.
/**
* Tests that the number of arenas and threads can be configured manually.
*/
@Test
public void testManualConfiguration() throws Exception {
// Expected numbers
int numberOfArenas = 1;
int numberOfClientThreads = 3;
int numberOfServerThreads = 4;
// Expected number of threads
Configuration flinkConfig = new Configuration();
flinkConfig.setInteger(NettyConfig.NUM_ARENAS, numberOfArenas);
flinkConfig.setInteger(NettyConfig.NUM_THREADS_CLIENT, 3);
flinkConfig.setInteger(NettyConfig.NUM_THREADS_SERVER, 4);
NettyConfig config = new NettyConfig(InetAddress.getLocalHost(), NetUtils.getAvailablePort(), 1024, 1337, flinkConfig);
NettyConnectionManager connectionManager = new NettyConnectionManager(config);
connectionManager.start(mock(ResultPartitionProvider.class), mock(TaskEventDispatcher.class), mock(NetworkBufferPool.class));
assertEquals(numberOfArenas, connectionManager.getBufferPool().getNumberOfArenas());
{
// Client event loop group
Bootstrap boostrap = connectionManager.getClient().getBootstrap();
EventLoopGroup group = boostrap.group();
Field f = group.getClass().getSuperclass().getSuperclass().getDeclaredField("children");
f.setAccessible(true);
Object[] eventExecutors = (Object[]) f.get(group);
assertEquals(numberOfClientThreads, eventExecutors.length);
}
{
// Server event loop group
ServerBootstrap bootstrap = connectionManager.getServer().getBootstrap();
EventLoopGroup group = bootstrap.group();
Field f = group.getClass().getSuperclass().getSuperclass().getDeclaredField("children");
f.setAccessible(true);
Object[] eventExecutors = (Object[]) f.get(group);
assertEquals(numberOfServerThreads, eventExecutors.length);
}
{
// Server child event loop group
ServerBootstrap bootstrap = connectionManager.getServer().getBootstrap();
EventLoopGroup group = bootstrap.childGroup();
Field f = group.getClass().getSuperclass().getSuperclass().getDeclaredField("children");
f.setAccessible(true);
Object[] eventExecutors = (Object[]) f.get(group);
assertEquals(numberOfServerThreads, eventExecutors.length);
}
}
use of io.netty.bootstrap.Bootstrap in project flink by apache.
the class NettyConnectionManagerTest method testMatchingNumberOfArenasAndThreadsAsDefault.
/**
* Tests that the number of arenas and number of threads of the client and
* server are set to the same number, that is the number of configured
* task slots.
*/
@Test
public void testMatchingNumberOfArenasAndThreadsAsDefault() throws Exception {
// Expected number of arenas and threads
int numberOfSlots = 2;
NettyConfig config = new NettyConfig(InetAddress.getLocalHost(), NetUtils.getAvailablePort(), 1024, numberOfSlots, new Configuration());
NettyConnectionManager connectionManager = new NettyConnectionManager(config);
connectionManager.start(mock(ResultPartitionProvider.class), mock(TaskEventDispatcher.class), mock(NetworkBufferPool.class));
assertEquals(numberOfSlots, connectionManager.getBufferPool().getNumberOfArenas());
{
// Client event loop group
Bootstrap boostrap = connectionManager.getClient().getBootstrap();
EventLoopGroup group = boostrap.group();
Field f = group.getClass().getSuperclass().getSuperclass().getDeclaredField("children");
f.setAccessible(true);
Object[] eventExecutors = (Object[]) f.get(group);
assertEquals(numberOfSlots, eventExecutors.length);
}
{
// Server event loop group
ServerBootstrap bootstrap = connectionManager.getServer().getBootstrap();
EventLoopGroup group = bootstrap.group();
Field f = group.getClass().getSuperclass().getSuperclass().getDeclaredField("children");
f.setAccessible(true);
Object[] eventExecutors = (Object[]) f.get(group);
assertEquals(numberOfSlots, eventExecutors.length);
}
{
// Server child event loop group
ServerBootstrap bootstrap = connectionManager.getServer().getBootstrap();
EventLoopGroup group = bootstrap.childGroup();
Field f = group.getClass().getSuperclass().getSuperclass().getDeclaredField("children");
f.setAccessible(true);
Object[] eventExecutors = (Object[]) f.get(group);
assertEquals(numberOfSlots, eventExecutors.length);
}
}
use of io.netty.bootstrap.Bootstrap in project flink by apache.
the class KvStateServerTest method testSimpleRequest.
/**
* Tests a simple successful query via a SocketChannel.
*/
@Test
public void testSimpleRequest() throws Exception {
KvStateServer server = null;
Bootstrap bootstrap = null;
try {
KvStateRegistry registry = new KvStateRegistry();
KvStateRequestStats stats = new AtomicKvStateRequestStats();
server = new KvStateServer(InetAddress.getLocalHost(), 0, 1, 1, registry, stats);
server.start();
KvStateServerAddress serverAddress = server.getAddress();
int numKeyGroups = 1;
AbstractStateBackend abstractBackend = new MemoryStateBackend();
DummyEnvironment dummyEnv = new DummyEnvironment("test", 1, 0);
dummyEnv.setKvStateRegistry(registry);
AbstractKeyedStateBackend<Integer> backend = abstractBackend.createKeyedStateBackend(dummyEnv, new JobID(), "test_op", IntSerializer.INSTANCE, numKeyGroups, new KeyGroupRange(0, 0), registry.createTaskRegistry(new JobID(), new JobVertexID()));
final KvStateServerHandlerTest.TestRegistryListener registryListener = new KvStateServerHandlerTest.TestRegistryListener();
registry.registerListener(registryListener);
ValueStateDescriptor<Integer> desc = new ValueStateDescriptor<>("any", IntSerializer.INSTANCE);
desc.setQueryable("vanilla");
ValueState<Integer> state = backend.getPartitionedState(VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, desc);
// Update KvState
int expectedValue = 712828289;
int key = 99812822;
backend.setCurrentKey(key);
state.update(expectedValue);
// Request
byte[] serializedKeyAndNamespace = KvStateRequestSerializer.serializeKeyAndNamespace(key, IntSerializer.INSTANCE, VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE);
// Connect to the server
final BlockingQueue<ByteBuf> responses = new LinkedBlockingQueue<>();
bootstrap = createBootstrap(new LengthFieldBasedFrameDecoder(Integer.MAX_VALUE, 0, 4, 0, 4), new ChannelInboundHandlerAdapter() {
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
responses.add((ByteBuf) msg);
}
});
Channel channel = bootstrap.connect(serverAddress.getHost(), serverAddress.getPort()).sync().channel();
long requestId = Integer.MAX_VALUE + 182828L;
assertTrue(registryListener.registrationName.equals("vanilla"));
ByteBuf request = KvStateRequestSerializer.serializeKvStateRequest(channel.alloc(), requestId, registryListener.kvStateId, serializedKeyAndNamespace);
channel.writeAndFlush(request);
ByteBuf buf = responses.poll(TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
assertEquals(KvStateRequestType.REQUEST_RESULT, KvStateRequestSerializer.deserializeHeader(buf));
KvStateRequestResult response = KvStateRequestSerializer.deserializeKvStateRequestResult(buf);
assertEquals(requestId, response.getRequestId());
int actualValue = KvStateRequestSerializer.deserializeValue(response.getSerializedResult(), IntSerializer.INSTANCE);
assertEquals(expectedValue, actualValue);
} finally {
if (server != null) {
server.shutDown();
}
if (bootstrap != null) {
EventLoopGroup group = bootstrap.group();
if (group != null) {
group.shutdownGracefully();
}
}
}
}
use of io.netty.bootstrap.Bootstrap in project hbase by apache.
the class FanOutOneBlockAsyncDFSOutputHelper method connectToDataNodes.
private static List<Future<Channel>> connectToDataNodes(Configuration conf, DFSClient client, String clientName, LocatedBlock locatedBlock, long maxBytesRcvd, long latestGS, BlockConstructionStage stage, DataChecksum summer, EventLoop eventLoop) {
Enum<?>[] storageTypes = locatedBlock.getStorageTypes();
DatanodeInfo[] datanodeInfos = locatedBlock.getLocations();
boolean connectToDnViaHostname = conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME, DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT);
int timeoutMs = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, READ_TIMEOUT);
ExtendedBlock blockCopy = new ExtendedBlock(locatedBlock.getBlock());
blockCopy.setNumBytes(locatedBlock.getBlockSize());
ClientOperationHeaderProto header = ClientOperationHeaderProto.newBuilder().setBaseHeader(BaseHeaderProto.newBuilder().setBlock(PB_HELPER.convert(blockCopy)).setToken(PB_HELPER.convert(locatedBlock.getBlockToken()))).setClientName(clientName).build();
ChecksumProto checksumProto = DataTransferProtoUtil.toProto(summer);
OpWriteBlockProto.Builder writeBlockProtoBuilder = OpWriteBlockProto.newBuilder().setHeader(header).setStage(OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name())).setPipelineSize(1).setMinBytesRcvd(locatedBlock.getBlock().getNumBytes()).setMaxBytesRcvd(maxBytesRcvd).setLatestGenerationStamp(latestGS).setRequestedChecksum(checksumProto).setCachingStrategy(CachingStrategyProto.newBuilder().setDropBehind(true).build());
List<Future<Channel>> futureList = new ArrayList<>(datanodeInfos.length);
for (int i = 0; i < datanodeInfos.length; i++) {
DatanodeInfo dnInfo = datanodeInfos[i];
Enum<?> storageType = storageTypes[i];
Promise<Channel> promise = eventLoop.newPromise();
futureList.add(promise);
String dnAddr = dnInfo.getXferAddr(connectToDnViaHostname);
new Bootstrap().group(eventLoop).channel(NioSocketChannel.class).option(CONNECT_TIMEOUT_MILLIS, timeoutMs).handler(new ChannelInitializer<Channel>() {
@Override
protected void initChannel(Channel ch) throws Exception {
// we need to get the remote address of the channel so we can only move on after
// channel connected. Leave an empty implementation here because netty does not allow
// a null handler.
}
}).connect(NetUtils.createSocketAddr(dnAddr)).addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) throws Exception {
if (future.isSuccess()) {
initialize(conf, future.channel(), dnInfo, storageType, writeBlockProtoBuilder, timeoutMs, client, locatedBlock.getBlockToken(), promise);
} else {
promise.tryFailure(future.cause());
}
}
});
}
return futureList;
}
use of io.netty.bootstrap.Bootstrap in project hive by apache.
the class Rpc method createClient.
/**
* Creates an RPC client for a server running on the given remote host and port.
*
* @param config RPC configuration data.
* @param eloop Event loop for managing the connection.
* @param host Host name or IP address to connect to.
* @param port Port where server is listening.
* @param clientId The client ID that identifies the connection.
* @param secret Secret for authenticating the client with the server.
* @param dispatcher Dispatcher used to handle RPC calls.
* @return A future that can be used to monitor the creation of the RPC object.
*/
public static Promise<Rpc> createClient(Map<String, String> config, final NioEventLoopGroup eloop, String host, int port, final String clientId, final String secret, final RpcDispatcher dispatcher) throws Exception {
final RpcConfiguration rpcConf = new RpcConfiguration(config);
int connectTimeoutMs = (int) rpcConf.getConnectTimeoutMs();
final ChannelFuture cf = new Bootstrap().group(eloop).handler(new ChannelInboundHandlerAdapter() {
}).channel(NioSocketChannel.class).option(ChannelOption.SO_KEEPALIVE, true).option(ChannelOption.CONNECT_TIMEOUT_MILLIS, connectTimeoutMs).connect(host, port);
final Promise<Rpc> promise = eloop.next().newPromise();
final AtomicReference<Rpc> rpc = new AtomicReference<Rpc>();
// Set up a timeout to undo everything.
final Runnable timeoutTask = new Runnable() {
@Override
public void run() {
promise.setFailure(new TimeoutException("Timed out waiting for RPC server connection."));
}
};
final ScheduledFuture<?> timeoutFuture = eloop.schedule(timeoutTask, connectTimeoutMs, TimeUnit.MILLISECONDS);
// The channel listener instantiates the Rpc instance when the connection is established,
// and initiates the SASL handshake.
cf.addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture cf) throws Exception {
if (cf.isSuccess()) {
SaslClientHandler saslHandler = new SaslClientHandler(rpcConf, clientId, promise, timeoutFuture, secret, dispatcher);
Rpc rpc = createRpc(rpcConf, saslHandler, (SocketChannel) cf.channel(), eloop);
saslHandler.rpc = rpc;
saslHandler.sendHello(cf.channel());
} else {
promise.setFailure(cf.cause());
}
}
});
// Handle cancellation of the promise.
promise.addListener(new GenericFutureListener<Promise<Rpc>>() {
@Override
public void operationComplete(Promise<Rpc> p) {
if (p.isCancelled()) {
cf.cancel(true);
}
}
});
return promise;
}
Aggregations