use of io.netty.util.concurrent.DefaultThreadFactory in project netty by netty.
the class ByteEchoClient method main.
public static void main(String[] args) throws Exception {
// Configure the client.
final ThreadFactory connectFactory = new DefaultThreadFactory("connect");
final NioEventLoopGroup connectGroup = new NioEventLoopGroup(1, connectFactory, NioUdtProvider.BYTE_PROVIDER);
try {
final Bootstrap boot = new Bootstrap();
boot.group(connectGroup).channelFactory(NioUdtProvider.BYTE_CONNECTOR).handler(new ChannelInitializer<UdtChannel>() {
@Override
public void initChannel(final UdtChannel ch) throws Exception {
ch.pipeline().addLast(new LoggingHandler(LogLevel.INFO), new ByteEchoClientHandler());
}
});
// Start the client.
final ChannelFuture f = boot.connect(HOST, PORT).sync();
// Wait until the connection is closed.
f.channel().closeFuture().sync();
} finally {
// Shut down the event loop to terminate all threads.
connectGroup.shutdownGracefully();
}
}
use of io.netty.util.concurrent.DefaultThreadFactory in project netty by netty.
the class ByteEchoServer method main.
public static void main(String[] args) throws Exception {
final ThreadFactory acceptFactory = new DefaultThreadFactory("accept");
final ThreadFactory connectFactory = new DefaultThreadFactory("connect");
final NioEventLoopGroup acceptGroup = new NioEventLoopGroup(1, acceptFactory, NioUdtProvider.BYTE_PROVIDER);
final NioEventLoopGroup connectGroup = new NioEventLoopGroup(1, connectFactory, NioUdtProvider.BYTE_PROVIDER);
// Configure the server.
try {
final ServerBootstrap boot = new ServerBootstrap();
boot.group(acceptGroup, connectGroup).channelFactory(NioUdtProvider.BYTE_ACCEPTOR).option(ChannelOption.SO_BACKLOG, 10).handler(new LoggingHandler(LogLevel.INFO)).childHandler(new ChannelInitializer<UdtChannel>() {
@Override
public void initChannel(final UdtChannel ch) throws Exception {
ch.pipeline().addLast(new LoggingHandler(LogLevel.INFO), new ByteEchoServerHandler());
}
});
// Start the server.
final ChannelFuture future = boot.bind(PORT).sync();
// Wait until the server socket is closed.
future.channel().closeFuture().sync();
} finally {
// Shut down all event loops to terminate all threads.
acceptGroup.shutdownGracefully();
connectGroup.shutdownGracefully();
}
}
use of io.netty.util.concurrent.DefaultThreadFactory in project netty by netty.
the class MsgEchoPeerBase method run.
public void run() throws Exception {
// Configure the peer.
final ThreadFactory connectFactory = new DefaultThreadFactory("rendezvous");
final NioEventLoopGroup connectGroup = new NioEventLoopGroup(1, connectFactory, NioUdtProvider.MESSAGE_PROVIDER);
try {
final Bootstrap boot = new Bootstrap();
boot.group(connectGroup).channelFactory(NioUdtProvider.MESSAGE_RENDEZVOUS).handler(new ChannelInitializer<UdtChannel>() {
@Override
public void initChannel(final UdtChannel ch) throws Exception {
ch.pipeline().addLast(new LoggingHandler(LogLevel.INFO), new MsgEchoPeerHandler(messageSize));
}
});
// Start the peer.
final ChannelFuture f = boot.connect(peer, self).sync();
// Wait until the connection is closed.
f.channel().closeFuture().sync();
} finally {
// Shut down the event loop to terminate all threads.
connectGroup.shutdownGracefully();
}
}
use of io.netty.util.concurrent.DefaultThreadFactory in project netty by netty.
the class LocalTransportThreadModelTest method testConcurrentMessageBufferAccess.
@Test(timeout = 30000)
@Ignore
public void testConcurrentMessageBufferAccess() throws Throwable {
EventLoopGroup l = new DefaultEventLoopGroup(4, new DefaultThreadFactory("l"));
EventExecutorGroup e1 = new DefaultEventExecutorGroup(4, new DefaultThreadFactory("e1"));
EventExecutorGroup e2 = new DefaultEventExecutorGroup(4, new DefaultThreadFactory("e2"));
EventExecutorGroup e3 = new DefaultEventExecutorGroup(4, new DefaultThreadFactory("e3"));
EventExecutorGroup e4 = new DefaultEventExecutorGroup(4, new DefaultThreadFactory("e4"));
EventExecutorGroup e5 = new DefaultEventExecutorGroup(4, new DefaultThreadFactory("e5"));
try {
final MessageForwarder1 h1 = new MessageForwarder1();
final MessageForwarder2 h2 = new MessageForwarder2();
final MessageForwarder3 h3 = new MessageForwarder3();
final MessageForwarder1 h4 = new MessageForwarder1();
final MessageForwarder2 h5 = new MessageForwarder2();
final MessageDiscarder h6 = new MessageDiscarder();
final Channel ch = new LocalChannel();
// inbound: int -> byte[4] -> int -> int -> byte[4] -> int -> /dev/null
// outbound: int -> int -> byte[4] -> int -> int -> byte[4] -> /dev/null
ch.pipeline().addLast(h1).addLast(e1, h2).addLast(e2, h3).addLast(e3, h4).addLast(e4, h5).addLast(e5, h6);
l.register(ch).sync().channel().connect(localAddr).sync();
final int ROUNDS = 1024;
final int ELEMS_PER_ROUNDS = 8192;
final int TOTAL_CNT = ROUNDS * ELEMS_PER_ROUNDS;
for (int i = 0; i < TOTAL_CNT; ) {
final int start = i;
final int end = i + ELEMS_PER_ROUNDS;
i = end;
ch.eventLoop().execute(new Runnable() {
@Override
public void run() {
for (int j = start; j < end; j++) {
ch.pipeline().fireChannelRead(Integer.valueOf(j));
}
}
});
}
while (h1.inCnt < TOTAL_CNT || h2.inCnt < TOTAL_CNT || h3.inCnt < TOTAL_CNT || h4.inCnt < TOTAL_CNT || h5.inCnt < TOTAL_CNT || h6.inCnt < TOTAL_CNT) {
if (h1.exception.get() != null) {
throw h1.exception.get();
}
if (h2.exception.get() != null) {
throw h2.exception.get();
}
if (h3.exception.get() != null) {
throw h3.exception.get();
}
if (h4.exception.get() != null) {
throw h4.exception.get();
}
if (h5.exception.get() != null) {
throw h5.exception.get();
}
if (h6.exception.get() != null) {
throw h6.exception.get();
}
Thread.sleep(10);
}
for (int i = 0; i < TOTAL_CNT; ) {
final int start = i;
final int end = i + ELEMS_PER_ROUNDS;
i = end;
ch.pipeline().context(h6).executor().execute(new Runnable() {
@Override
public void run() {
for (int j = start; j < end; j++) {
ch.write(Integer.valueOf(j));
}
ch.flush();
}
});
}
while (h1.outCnt < TOTAL_CNT || h2.outCnt < TOTAL_CNT || h3.outCnt < TOTAL_CNT || h4.outCnt < TOTAL_CNT || h5.outCnt < TOTAL_CNT || h6.outCnt < TOTAL_CNT) {
if (h1.exception.get() != null) {
throw h1.exception.get();
}
if (h2.exception.get() != null) {
throw h2.exception.get();
}
if (h3.exception.get() != null) {
throw h3.exception.get();
}
if (h4.exception.get() != null) {
throw h4.exception.get();
}
if (h5.exception.get() != null) {
throw h5.exception.get();
}
if (h6.exception.get() != null) {
throw h6.exception.get();
}
Thread.sleep(10);
}
ch.close().sync();
} finally {
l.shutdownGracefully();
e1.shutdownGracefully();
e2.shutdownGracefully();
e3.shutdownGracefully();
e4.shutdownGracefully();
e5.shutdownGracefully();
l.terminationFuture().sync();
e1.terminationFuture().sync();
e2.terminationFuture().sync();
e3.terminationFuture().sync();
e4.terminationFuture().sync();
e5.terminationFuture().sync();
}
}
use of io.netty.util.concurrent.DefaultThreadFactory in project pulsar by yahoo.
the class MockZooKeeper method init.
private void init(ExecutorService executor) {
tree = Maps.newTreeMap();
if (executor != null) {
this.executor = executor;
} else {
this.executor = Executors.newFixedThreadPool(1, new DefaultThreadFactory("mock-zookeeper"));
}
SetMultimap<String, Watcher> w = HashMultimap.create();
watchers = Multimaps.synchronizedSetMultimap(w);
stopped = false;
stepsToFail = new AtomicInteger(-1);
failReturnCode = KeeperException.Code.OK;
}
Aggregations