Search in sources :

Example 26 with DefaultThreadFactory

use of io.netty.util.concurrent.DefaultThreadFactory in project grpc-java by grpc.

the class AsyncServer method newServer.

static Server newServer(ServerConfiguration config) throws IOException {
    final EventLoopGroup boss;
    final EventLoopGroup worker;
    final Class<? extends ServerChannel> channelType;
    ThreadFactory tf = new DefaultThreadFactory("server-elg-", true);
    switch(config.transport) {
        case NETTY_NIO:
            {
                boss = new NioEventLoopGroup(1, tf);
                worker = new NioEventLoopGroup(0, tf);
                channelType = NioServerSocketChannel.class;
                break;
            }
        case NETTY_EPOLL:
            {
                try {
                    // These classes are only available on linux.
                    Class<?> groupClass = Class.forName("io.netty.channel.epoll.EpollEventLoopGroup");
                    @SuppressWarnings("unchecked") Class<? extends ServerChannel> channelClass = (Class<? extends ServerChannel>) Class.forName("io.netty.channel.epoll.EpollServerSocketChannel");
                    boss = (EventLoopGroup) groupClass.getConstructor(int.class, ThreadFactory.class).newInstance(1, tf);
                    worker = (EventLoopGroup) groupClass.getConstructor(int.class, ThreadFactory.class).newInstance(0, tf);
                    channelType = channelClass;
                    break;
                } catch (Exception e) {
                    throw new RuntimeException(e);
                }
            }
        case NETTY_UNIX_DOMAIN_SOCKET:
            {
                try {
                    // These classes are only available on linux.
                    Class<?> groupClass = Class.forName("io.netty.channel.epoll.EpollEventLoopGroup");
                    @SuppressWarnings("unchecked") Class<? extends ServerChannel> channelClass = (Class<? extends ServerChannel>) Class.forName("io.netty.channel.epoll.EpollServerDomainSocketChannel");
                    boss = (EventLoopGroup) groupClass.getConstructor(int.class, ThreadFactory.class).newInstance(1, tf);
                    worker = (EventLoopGroup) groupClass.getConstructor(int.class, ThreadFactory.class).newInstance(0, tf);
                    channelType = channelClass;
                    break;
                } catch (Exception e) {
                    throw new RuntimeException(e);
                }
            }
        default:
            {
                // Should never get here.
                throw new IllegalArgumentException("Unsupported transport: " + config.transport);
            }
    }
    NettyServerBuilder builder = NettyServerBuilder.forAddress(config.address).bossEventLoopGroup(boss).workerEventLoopGroup(worker).channelType(channelType).addService(new BenchmarkServiceImpl()).flowControlWindow(config.flowControlWindow);
    if (config.tls) {
        System.out.println("Using fake CA for TLS certificate.\n" + "Run the Java client with --tls --testca");
        File cert = TestUtils.loadCert("server1.pem");
        File key = TestUtils.loadCert("server1.key");
        builder.useTransportSecurity(cert, key);
    }
    if (config.directExecutor) {
        builder.directExecutor();
    } else {
        // TODO(carl-mastrangelo): This should not be necessary.  I don't know where this should be
        // put.  Move it somewhere else, or remove it if no longer necessary.
        // See: https://github.com/grpc/grpc-java/issues/2119
        builder.executor(new ForkJoinPool(Runtime.getRuntime().availableProcessors(), new ForkJoinWorkerThreadFactory() {

            final AtomicInteger num = new AtomicInteger();

            @Override
            public ForkJoinWorkerThread newThread(ForkJoinPool pool) {
                ForkJoinWorkerThread thread = ForkJoinPool.defaultForkJoinWorkerThreadFactory.newThread(pool);
                thread.setDaemon(true);
                thread.setName("grpc-server-app-" + "-" + num.getAndIncrement());
                return thread;
            }
        }, UncaughtExceptionHandlers.systemExit(), true));
    }
    return builder.build();
}
Also used : DefaultThreadFactory(io.netty.util.concurrent.DefaultThreadFactory) ThreadFactory(java.util.concurrent.ThreadFactory) ForkJoinWorkerThreadFactory(java.util.concurrent.ForkJoinPool.ForkJoinWorkerThreadFactory) NioServerSocketChannel(io.netty.channel.socket.nio.NioServerSocketChannel) NettyServerBuilder(io.grpc.netty.NettyServerBuilder) ServerChannel(io.netty.channel.ServerChannel) IOException(java.io.IOException) DefaultThreadFactory(io.netty.util.concurrent.DefaultThreadFactory) EventLoopGroup(io.netty.channel.EventLoopGroup) NioEventLoopGroup(io.netty.channel.nio.NioEventLoopGroup) ForkJoinWorkerThreadFactory(java.util.concurrent.ForkJoinPool.ForkJoinWorkerThreadFactory) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ForkJoinWorkerThread(java.util.concurrent.ForkJoinWorkerThread) File(java.io.File) NioEventLoopGroup(io.netty.channel.nio.NioEventLoopGroup) ForkJoinPool(java.util.concurrent.ForkJoinPool)

Example 27 with DefaultThreadFactory

use of io.netty.util.concurrent.DefaultThreadFactory in project pulsar by yahoo.

the class MockZooKeeper method init.

private void init(ExecutorService executor) {
    tree = Maps.newTreeMap();
    if (executor != null) {
        this.executor = executor;
    } else {
        this.executor = Executors.newFixedThreadPool(1, new DefaultThreadFactory("mock-zookeeper"));
    }
    SetMultimap<String, Watcher> w = HashMultimap.create();
    watchers = Multimaps.synchronizedSetMultimap(w);
    stopped = false;
    stepsToFail = new AtomicInteger(-1);
    failReturnCode = KeeperException.Code.OK;
}
Also used : DefaultThreadFactory(io.netty.util.concurrent.DefaultThreadFactory) AtomicInteger(java.util.concurrent.atomic.AtomicInteger)

Example 28 with DefaultThreadFactory

use of io.netty.util.concurrent.DefaultThreadFactory in project pulsar by yahoo.

the class PerformanceClient method runPerformanceTest.

public void runPerformanceTest(long messages, long limit, int numOfTopic, int sizeOfMessage, String baseUrl, String destination) throws InterruptedException, FileNotFoundException {
    ExecutorService executor = Executors.newCachedThreadPool(new DefaultThreadFactory("pulsar-perf-producer-exec"));
    HashMap<String, Tuple> producersMap = new HashMap<>();
    String produceBaseEndPoint = baseUrl + destination;
    for (int i = 0; i < numOfTopic; i++) {
        String topic = produceBaseEndPoint + "1" + "/";
        URI produceUri = URI.create(topic);
        WebSocketClient produceClient = new WebSocketClient(new SslContextFactory(true));
        ClientUpgradeRequest produceRequest = new ClientUpgradeRequest();
        SimpleTestProducerSocket produceSocket = new SimpleTestProducerSocket();
        try {
            produceClient.start();
            produceClient.connect(produceSocket, produceUri, produceRequest);
        } catch (IOException e1) {
            log.error("Fail in connecting: [{}]", e1.getMessage());
            return;
        } catch (Exception e1) {
            log.error("Fail in starting client[{}]", e1.getMessage());
            return;
        }
        producersMap.put(produceUri.toString(), new Tuple(produceClient, produceRequest, produceSocket));
    }
    // connection to be established
    TimeUnit.SECONDS.sleep(5);
    executor.submit(() -> {
        try {
            RateLimiter rateLimiter = RateLimiter.create(limit);
            // Send messages on all topics/producers
            long totalSent = 0;
            while (true) {
                for (String topic : producersMap.keySet()) {
                    if (messages > 0) {
                        if (totalSent++ >= messages) {
                            log.trace("------------------- DONE -----------------------");
                            Thread.sleep(10000);
                            System.exit(0);
                        }
                    }
                    rateLimiter.acquire();
                    if (producersMap.get(topic).getSocket().getSession() == null) {
                        Thread.sleep(10000);
                        System.exit(0);
                    }
                    producersMap.get(topic).getSocket().sendMsg((String) String.valueOf(totalSent), sizeOfMessage);
                    messagesSent.increment();
                    bytesSent.add(1000);
                }
            }
        } catch (Throwable t) {
            log.error(t.getMessage());
            System.exit(0);
        }
    });
    // Print report stats
    long oldTime = System.nanoTime();
    Histogram reportHistogram = null;
    String statsFileName = "perf-websocket-producer-" + System.currentTimeMillis() + ".hgrm";
    log.info("Dumping latency stats to %s \n", statsFileName);
    PrintStream histogramLog = new PrintStream(new FileOutputStream(statsFileName), false);
    HistogramLogWriter histogramLogWriter = new HistogramLogWriter(histogramLog);
    // Some log header bits
    histogramLogWriter.outputLogFormatVersion();
    histogramLogWriter.outputLegend();
    while (true) {
        try {
            Thread.sleep(5000);
        } catch (InterruptedException e) {
            break;
        }
        long now = System.nanoTime();
        double elapsed = (now - oldTime) / 1e9;
        double rate = messagesSent.sumThenReset() / elapsed;
        double throughput = bytesSent.sumThenReset() / elapsed / 1024 / 1024 * 8;
        reportHistogram = SimpleTestProducerSocket.recorder.getIntervalHistogram(reportHistogram);
        log.info("Throughput produced: {}  msg/s --- {} Mbit/s --- Latency: mean: {} ms - med: {} ms - 95pct: {} ms - 99pct: {} ms - 99.9pct: {} ms - 99.99pct: {} ms", throughputFormat.format(rate), throughputFormat.format(throughput), dec.format(reportHistogram.getMean() / 1000.0), dec.format(reportHistogram.getValueAtPercentile(50) / 1000.0), dec.format(reportHistogram.getValueAtPercentile(95) / 1000.0), dec.format(reportHistogram.getValueAtPercentile(99) / 1000.0), dec.format(reportHistogram.getValueAtPercentile(99.9) / 1000.0), dec.format(reportHistogram.getValueAtPercentile(99.99) / 1000.0));
        histogramLogWriter.outputIntervalHistogram(reportHistogram);
        reportHistogram.reset();
        oldTime = now;
    }
    TimeUnit.SECONDS.sleep(100);
    executor.shutdown();
}
Also used : HistogramLogWriter(org.HdrHistogram.HistogramLogWriter) PrintStream(java.io.PrintStream) Histogram(org.HdrHistogram.Histogram) HashMap(java.util.HashMap) IOException(java.io.IOException) WebSocketClient(org.eclipse.jetty.websocket.client.WebSocketClient) URI(java.net.URI) ParameterException(com.beust.jcommander.ParameterException) IOException(java.io.IOException) FileNotFoundException(java.io.FileNotFoundException) RateLimiter(com.google.common.util.concurrent.RateLimiter) DefaultThreadFactory(io.netty.util.concurrent.DefaultThreadFactory) SslContextFactory(org.eclipse.jetty.util.ssl.SslContextFactory) FileOutputStream(java.io.FileOutputStream) ExecutorService(java.util.concurrent.ExecutorService) ClientUpgradeRequest(org.eclipse.jetty.websocket.client.ClientUpgradeRequest)

Example 29 with DefaultThreadFactory

use of io.netty.util.concurrent.DefaultThreadFactory in project pulsar by yahoo.

the class PerformanceConsumer method main.

public static void main(String[] args) throws Exception {
    final Arguments arguments = new Arguments();
    JCommander jc = new JCommander(arguments);
    jc.setProgramName("pulsar-perf-consumer");
    try {
        jc.parse(args);
    } catch (ParameterException e) {
        System.out.println(e.getMessage());
        jc.usage();
        System.exit(-1);
    }
    if (arguments.help) {
        jc.usage();
        System.exit(-1);
    }
    if (arguments.topic.size() != 1) {
        System.out.println("Only one destination name is allowed");
        jc.usage();
        System.exit(-1);
    }
    if (arguments.confFile != null) {
        Properties prop = new Properties(System.getProperties());
        prop.load(new FileInputStream(arguments.confFile));
        if (arguments.serviceURL == null) {
            arguments.serviceURL = prop.getProperty("brokerServiceUrl");
        }
        if (arguments.serviceURL == null) {
            arguments.serviceURL = prop.getProperty("webServiceUrl");
        }
        // fallback to previous-version serviceUrl property to maintain backward-compatibility
        if (arguments.serviceURL == null) {
            arguments.serviceURL = prop.getProperty("serviceUrl", "http://localhost:8080/");
        }
        if (arguments.authPluginClassName == null) {
            arguments.authPluginClassName = prop.getProperty("authPlugin", null);
        }
        if (arguments.authParams == null) {
            arguments.authParams = prop.getProperty("authParams", null);
        }
    }
    // Dump config variables
    ObjectMapper m = new ObjectMapper();
    ObjectWriter w = m.writerWithDefaultPrettyPrinter();
    log.info("Starting Pulsar performance consumer with config: {}", w.writeValueAsString(arguments));
    final DestinationName prefixDestinationName = DestinationName.get(arguments.topic.get(0));
    final RateLimiter limiter = arguments.rate > 0 ? RateLimiter.create(arguments.rate) : null;
    MessageListener listener = new MessageListener() {

        public void received(Consumer consumer, Message msg) {
            messagesReceived.increment();
            bytesReceived.add(msg.getData().length);
            if (limiter != null) {
                limiter.acquire();
            }
            consumer.acknowledgeAsync(msg);
        }
    };
    EventLoopGroup eventLoopGroup;
    if (SystemUtils.IS_OS_LINUX) {
        eventLoopGroup = new EpollEventLoopGroup(Runtime.getRuntime().availableProcessors() * 2, new DefaultThreadFactory("pulsar-perf-consumer"));
    } else {
        eventLoopGroup = new NioEventLoopGroup(Runtime.getRuntime().availableProcessors(), new DefaultThreadFactory("pulsar-perf-consumer"));
    }
    ClientConfiguration clientConf = new ClientConfiguration();
    clientConf.setConnectionsPerBroker(arguments.maxConnections);
    clientConf.setStatsInterval(arguments.statsIntervalSeconds, TimeUnit.SECONDS);
    if (isNotBlank(arguments.authPluginClassName)) {
        clientConf.setAuthentication(arguments.authPluginClassName, arguments.authParams);
    }
    PulsarClient pulsarClient = new PulsarClientImpl(arguments.serviceURL, clientConf, eventLoopGroup);
    List<Future<Consumer>> futures = Lists.newArrayList();
    ConsumerConfiguration consumerConfig = new ConsumerConfiguration();
    consumerConfig.setMessageListener(listener);
    consumerConfig.setReceiverQueueSize(arguments.receiverQueueSize);
    for (int i = 0; i < arguments.numDestinations; i++) {
        final DestinationName destinationName = (arguments.numDestinations == 1) ? prefixDestinationName : DestinationName.get(String.format("%s-%d", prefixDestinationName, i));
        log.info("Adding {} consumers on destination {}", arguments.numConsumers, destinationName);
        for (int j = 0; j < arguments.numConsumers; j++) {
            String subscriberName;
            if (arguments.numConsumers > 1) {
                subscriberName = String.format("%s-%d", arguments.subscriberName, j);
            } else {
                subscriberName = arguments.subscriberName;
            }
            futures.add(pulsarClient.subscribeAsync(destinationName.toString(), subscriberName, consumerConfig));
        }
    }
    for (Future<Consumer> future : futures) {
        future.get();
    }
    log.info("Start receiving from {} consumers on {} destinations", arguments.numConsumers, arguments.numDestinations);
    long oldTime = System.nanoTime();
    while (true) {
        try {
            Thread.sleep(10000);
        } catch (InterruptedException e) {
            break;
        }
        long now = System.nanoTime();
        double elapsed = (now - oldTime) / 1e9;
        double rate = messagesReceived.sumThenReset() / elapsed;
        double throughput = bytesReceived.sumThenReset() / elapsed * 8 / 1024 / 1024;
        log.info("Throughput received: {}  msg/s -- {} Mbit/s", dec.format(rate), dec.format(throughput));
        oldTime = now;
    }
    pulsarClient.close();
}
Also used : Message(com.yahoo.pulsar.client.api.Message) MessageListener(com.yahoo.pulsar.client.api.MessageListener) Properties(java.util.Properties) DefaultThreadFactory(io.netty.util.concurrent.DefaultThreadFactory) Consumer(com.yahoo.pulsar.client.api.Consumer) JCommander(com.beust.jcommander.JCommander) DestinationName(com.yahoo.pulsar.common.naming.DestinationName) ConsumerConfiguration(com.yahoo.pulsar.client.api.ConsumerConfiguration) ParameterException(com.beust.jcommander.ParameterException) PulsarClient(com.yahoo.pulsar.client.api.PulsarClient) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) NioEventLoopGroup(io.netty.channel.nio.NioEventLoopGroup) ObjectWriter(com.fasterxml.jackson.databind.ObjectWriter) FileInputStream(java.io.FileInputStream) RateLimiter(com.google.common.util.concurrent.RateLimiter) EpollEventLoopGroup(io.netty.channel.epoll.EpollEventLoopGroup) EventLoopGroup(io.netty.channel.EventLoopGroup) NioEventLoopGroup(io.netty.channel.nio.NioEventLoopGroup) EpollEventLoopGroup(io.netty.channel.epoll.EpollEventLoopGroup) Future(java.util.concurrent.Future) PulsarClientImpl(com.yahoo.pulsar.client.impl.PulsarClientImpl) ClientConfiguration(com.yahoo.pulsar.client.api.ClientConfiguration)

Example 30 with DefaultThreadFactory

use of io.netty.util.concurrent.DefaultThreadFactory in project transporter by wang4ever.

the class NettyChannelServer method listen.

/**
 * 监听服务启动
 *
 * @throws Exception
 * @sine
 */
private void listen() throws Exception {
    if (this.startup) {
        logger.info("Netty server has been started({}).", this.port);
        return;
    }
    this.startup = true;
    // 启动引导程序
    ServerBootstrap bootstrap = new ServerBootstrap();
    // 事件处理器组(masters用来接收客户端连接并分配给slaves,slaves用来处理客户端连接)
    EventLoopGroup masters = new NioEventLoopGroup(1, new DefaultThreadFactory("NettyServerMaster", true));
    // Threads set 0, Netty will use availableProcessors () * 2 by default
    EventLoopGroup worker = new NioEventLoopGroup(0, new DefaultThreadFactory("NettyServerWorker", true));
    try {
        bootstrap.group(masters, worker);
        // 设置为Nio通道模式
        bootstrap.channel(NioServerSocketChannel.class);
        // 设置通道传输模式,立即传输模式,不需要等待特定大小
        bootstrap.childOption(ChannelOption.TCP_NODELAY, true);
        // 设置重用处于TIME_WAIT但是未完全关闭的socket地址
        // https://www.cnblogs.com/zemliu/p/3692996.html
        bootstrap.childOption(ChannelOption.SO_REUSEADDR, true);
        // 设置ByteBuff内存分配器
        // 主要有两种创建方式:UnpooledByteBufAllocator/PooledByteBufAllocator,在netty5.0中后者是默认的,可以重复利用之前分配的内存空间。这个可以有效减少内存的使用
        bootstrap.childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);
        // 设置worker的socket通道模式,长连接
        bootstrap.childOption(ChannelOption.SO_KEEPALIVE, true);
        // 设置最大连接数(TCP底层syns队列/accept队列),是提供给NioServerSocketChannel用来接收进来的连接,也就是boss线程
        // https://www.jianshu.com/p/e6f2036621f4,注意会依赖操作系统的TCP连接队列
        bootstrap.option(ChannelOption.SO_BACKLOG, backlog);
        // 设置slaves的处理器队列
        bootstrap.childHandler(this.handlerInitializer);
        // 绑定端口号,以异步方式提供服务
        ChannelFuture f = bootstrap.bind(this.hostname, this.port).sync();
        f.addListener(new ChannelFutureListener() {

            @Override
            public void operationComplete(ChannelFuture future) throws Exception {
                Throwable t = future.cause();
                if (future.isSuccess())
                    logger.info("Netty started on port(s): {} ({})", hostname + ":" + port, name);
                else
                    logger.error(t.getMessage(), t);
            }
        });
        // The thread begins to wait here unless there is a socket event
        // wake-up.
        f.channel().closeFuture().sync();
    } catch (InterruptedException e) {
        logger.error("Netty server start failed.", e);
        throw e;
    } finally {
        // 优雅关闭
        masters.shutdownGracefully();
        worker.shutdownGracefully();
        if (logger.isInfoEnabled())
            logger.info("Netty Server Stop Gracefully !");
    }
}
Also used : DefaultThreadFactory(io.netty.util.concurrent.DefaultThreadFactory) ChannelFuture(io.netty.channel.ChannelFuture) EventLoopGroup(io.netty.channel.EventLoopGroup) NioEventLoopGroup(io.netty.channel.nio.NioEventLoopGroup) ChannelFutureListener(io.netty.channel.ChannelFutureListener) ServerBootstrap(io.netty.bootstrap.ServerBootstrap) NioEventLoopGroup(io.netty.channel.nio.NioEventLoopGroup)

Aggregations

DefaultThreadFactory (io.netty.util.concurrent.DefaultThreadFactory)49 NioEventLoopGroup (io.netty.channel.nio.NioEventLoopGroup)24 EventLoopGroup (io.netty.channel.EventLoopGroup)15 ThreadFactory (java.util.concurrent.ThreadFactory)12 ChannelFuture (io.netty.channel.ChannelFuture)9 ExecutorService (java.util.concurrent.ExecutorService)8 Test (org.junit.jupiter.api.Test)8 EpollEventLoopGroup (io.netty.channel.epoll.EpollEventLoopGroup)7 IOException (java.io.IOException)7 ServerBootstrap (io.netty.bootstrap.ServerBootstrap)6 NioServerSocketChannel (io.netty.channel.socket.nio.NioServerSocketChannel)6 UdtChannel (io.netty.channel.udt.UdtChannel)6 LoggingHandler (io.netty.handler.logging.LoggingHandler)6 CountDownLatch (java.util.concurrent.CountDownLatch)6 RateLimiter (com.google.common.util.concurrent.RateLimiter)5 Channel (io.netty.channel.Channel)5 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)5 Timeout (org.junit.jupiter.api.Timeout)5 Test (org.testng.annotations.Test)5 ParameterException (com.beust.jcommander.ParameterException)4