Search in sources :

Example 36 with DefaultThreadFactory

use of io.netty.util.concurrent.DefaultThreadFactory in project bookkeeper by apache.

the class UpdateLedgerOp method updateBookieIdInLedgers.

/**
 * Update the bookie id present in the ledger metadata.
 *
 * @param oldBookieId
 *            current bookie id
 * @param newBookieId
 *            new bookie id
 * @param rate
 *            number of ledgers updating per second (default 5 per sec)
 * @param limit
 *            maximum number of ledgers to update (default: no limit). Stop
 *            update if reaching limit
 * @param progressable
 *            report progress of the ledger updates
 * @throws IOException
 *             if there is an error when updating bookie id in ledger
 *             metadata
 * @throws InterruptedException
 *             interrupted exception when update ledger meta
 */
public void updateBookieIdInLedgers(final BookieSocketAddress oldBookieId, final BookieSocketAddress newBookieId, final int rate, final int limit, final UpdateLedgerNotifier progressable) throws IOException {
    final ExecutorService executor = Executors.newSingleThreadExecutor(new DefaultThreadFactory("UpdateLedgerThread", true));
    final AtomicInteger issuedLedgerCnt = new AtomicInteger();
    final AtomicInteger updatedLedgerCnt = new AtomicInteger();
    final Future<?> updateBookieCb = executor.submit(new Runnable() {

        @Override
        public void run() {
            updateLedgers(oldBookieId, newBookieId, rate, limit, progressable);
        }

        private void updateLedgers(final BookieSocketAddress oldBookieId, final BookieSocketAddress newBookieId, final int rate, final int limit, final UpdateLedgerNotifier progressable) {
            try {
                final AtomicBoolean stop = new AtomicBoolean(false);
                final Set<Long> outstandings = Collections.newSetFromMap(new ConcurrentHashMap<Long, Boolean>());
                final RateLimiter throttler = RateLimiter.create(rate);
                final Iterator<Long> ledgerItr = admin.listLedgers().iterator();
                final CountDownLatch syncObj = new CountDownLatch(1);
                // iterate through all the ledgers
                while (ledgerItr.hasNext() && !stop.get()) {
                    // throttler to control updates per second
                    throttler.acquire();
                    final Long lId = ledgerItr.next();
                    final ReadLedgerMetadataCb readCb = new ReadLedgerMetadataCb(bkc, lId, oldBookieId, newBookieId);
                    outstandings.add(lId);
                    FutureCallback<Void> updateLedgerCb = new UpdateLedgerCb(lId, stop, issuedLedgerCnt, updatedLedgerCnt, outstandings, syncObj, progressable);
                    Futures.addCallback(readCb.getFutureListener(), updateLedgerCb);
                    issuedLedgerCnt.incrementAndGet();
                    if (limit != Integer.MIN_VALUE && issuedLedgerCnt.get() >= limit || !ledgerItr.hasNext()) {
                        stop.set(true);
                    }
                    bkc.getLedgerManager().readLedgerMetadata(lId, readCb);
                }
                // waiting till all the issued ledgers are finished
                syncObj.await();
            } catch (IOException ioe) {
                LOG.error("Exception while updating ledger", ioe);
                throw new RuntimeException("Exception while updating ledger", ioe.getCause());
            } catch (InterruptedException ie) {
                LOG.error("Exception while updating ledger metadata", ie);
                Thread.currentThread().interrupt();
                throw new RuntimeException("Exception while updating ledger", ie.getCause());
            }
        }
    });
    try {
        // Wait to finish the issued ledgers.
        updateBookieCb.get();
    } catch (ExecutionException ee) {
        throw new IOException("Exception while updating ledger", ee);
    } catch (InterruptedException ie) {
        Thread.currentThread().interrupt();
        throw new IOException("Exception while updating ledger", ie);
    } finally {
        executor.shutdown();
    }
}
Also used : Set(java.util.Set) IOException(java.io.IOException) CountDownLatch(java.util.concurrent.CountDownLatch) RateLimiter(com.google.common.util.concurrent.RateLimiter) DefaultThreadFactory(io.netty.util.concurrent.DefaultThreadFactory) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) BookieSocketAddress(org.apache.bookkeeper.net.BookieSocketAddress) ExecutorService(java.util.concurrent.ExecutorService) Iterator(java.util.Iterator) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ExecutionException(java.util.concurrent.ExecutionException) FutureCallback(com.google.common.util.concurrent.FutureCallback) UpdateLedgerNotifier(org.apache.bookkeeper.bookie.BookieShell.UpdateLedgerNotifier)

Example 37 with DefaultThreadFactory

use of io.netty.util.concurrent.DefaultThreadFactory in project bookkeeper by apache.

the class TestGetBookieInfoTimeout method setUp.

@Before
public void setUp() throws Exception {
    super.setUp();
    eventLoopGroup = new NioEventLoopGroup();
    executor = OrderedExecutor.newBuilder().name("BKClientOrderedSafeExecutor").numThreads(2).build();
    scheduler = Executors.newSingleThreadScheduledExecutor(new DefaultThreadFactory("BookKeeperClientScheduler"));
}
Also used : DefaultThreadFactory(io.netty.util.concurrent.DefaultThreadFactory) NioEventLoopGroup(io.netty.channel.nio.NioEventLoopGroup) Before(org.junit.Before)

Example 38 with DefaultThreadFactory

use of io.netty.util.concurrent.DefaultThreadFactory in project grpc-java by grpc.

the class AbstractBenchmark method setup.

/**
 * Initialize the environment for the executor.
 */
public void setup(ExecutorType clientExecutor, ExecutorType serverExecutor, MessageSize requestSize, MessageSize responseSize, FlowWindowSize windowSize, ChannelType channelType, int maxConcurrentStreams, int channelCount) throws Exception {
    ServerCredentials serverCreds = InsecureServerCredentials.create();
    NettyServerBuilder serverBuilder;
    NettyChannelBuilder channelBuilder;
    if (channelType == ChannelType.LOCAL) {
        LocalAddress address = new LocalAddress("netty-e2e-benchmark");
        serverBuilder = NettyServerBuilder.forAddress(address, serverCreds);
        serverBuilder.channelType(LocalServerChannel.class);
        channelBuilder = NettyChannelBuilder.forAddress(address);
        channelBuilder.channelType(LocalChannel.class);
    } else {
        ServerSocket sock = new ServerSocket();
        // Pick a port using an ephemeral socket.
        sock.bind(new InetSocketAddress(BENCHMARK_ADDR, 0));
        SocketAddress address = sock.getLocalSocketAddress();
        sock.close();
        serverBuilder = NettyServerBuilder.forAddress(address, serverCreds).channelType(NioServerSocketChannel.class);
        channelBuilder = NettyChannelBuilder.forAddress(address).channelType(NioSocketChannel.class);
    }
    if (serverExecutor == ExecutorType.DIRECT) {
        serverBuilder.directExecutor();
    }
    if (clientExecutor == ExecutorType.DIRECT) {
        channelBuilder.directExecutor();
    }
    // Always use a different worker group from the client.
    ThreadFactory serverThreadFactory = new DefaultThreadFactory("STF pool", true);
    serverBuilder.workerEventLoopGroup(new NioEventLoopGroup(0, serverThreadFactory));
    serverBuilder.bossEventLoopGroup(new NioEventLoopGroup(1, serverThreadFactory));
    // Always set connection and stream window size to same value
    serverBuilder.flowControlWindow(windowSize.bytes());
    channelBuilder.flowControlWindow(windowSize.bytes());
    channelBuilder.negotiationType(NegotiationType.PLAINTEXT);
    serverBuilder.maxConcurrentCallsPerConnection(maxConcurrentStreams);
    // Create buffers of the desired size for requests and responses.
    PooledByteBufAllocator alloc = PooledByteBufAllocator.DEFAULT;
    // Use a heap buffer for now, since MessageFramer doesn't know how to directly convert this
    // into a WritableBuffer
    // TODO(carl-mastrangelo): convert this into a regular buffer() call.  See
    // https://github.com/grpc/grpc-java/issues/2062#issuecomment-234646216
    request = alloc.heapBuffer(requestSize.bytes());
    request.writerIndex(request.capacity() - 1);
    response = alloc.heapBuffer(responseSize.bytes());
    response.writerIndex(response.capacity() - 1);
    // Simple method that sends and receives NettyByteBuf
    unaryMethod = MethodDescriptor.<ByteBuf, ByteBuf>newBuilder().setType(MethodType.UNARY).setFullMethodName("benchmark/unary").setRequestMarshaller(new ByteBufOutputMarshaller()).setResponseMarshaller(new ByteBufOutputMarshaller()).build();
    pingPongMethod = unaryMethod.toBuilder().setType(MethodType.BIDI_STREAMING).setFullMethodName("benchmark/pingPong").build();
    flowControlledStreaming = pingPongMethod.toBuilder().setFullMethodName("benchmark/flowControlledStreaming").build();
    // Server implementation of unary & streaming methods
    serverBuilder.addService(ServerServiceDefinition.builder(new ServiceDescriptor("benchmark", unaryMethod, pingPongMethod, flowControlledStreaming)).addMethod(unaryMethod, new ServerCallHandler<ByteBuf, ByteBuf>() {

        @Override
        public ServerCall.Listener<ByteBuf> startCall(final ServerCall<ByteBuf, ByteBuf> call, Metadata headers) {
            call.sendHeaders(new Metadata());
            call.request(1);
            return new ServerCall.Listener<ByteBuf>() {

                @Override
                public void onMessage(ByteBuf message) {
                    // no-op
                    message.release();
                    call.sendMessage(response.slice());
                }

                @Override
                public void onHalfClose() {
                    call.close(Status.OK, new Metadata());
                }

                @Override
                public void onCancel() {
                }

                @Override
                public void onComplete() {
                }
            };
        }
    }).addMethod(pingPongMethod, new ServerCallHandler<ByteBuf, ByteBuf>() {

        @Override
        public ServerCall.Listener<ByteBuf> startCall(final ServerCall<ByteBuf, ByteBuf> call, Metadata headers) {
            call.sendHeaders(new Metadata());
            call.request(1);
            return new ServerCall.Listener<ByteBuf>() {

                @Override
                public void onMessage(ByteBuf message) {
                    message.release();
                    call.sendMessage(response.slice());
                    // Request next message
                    call.request(1);
                }

                @Override
                public void onHalfClose() {
                    call.close(Status.OK, new Metadata());
                }

                @Override
                public void onCancel() {
                }

                @Override
                public void onComplete() {
                }
            };
        }
    }).addMethod(flowControlledStreaming, new ServerCallHandler<ByteBuf, ByteBuf>() {

        @Override
        public ServerCall.Listener<ByteBuf> startCall(final ServerCall<ByteBuf, ByteBuf> call, Metadata headers) {
            call.sendHeaders(new Metadata());
            call.request(1);
            return new ServerCall.Listener<ByteBuf>() {

                @Override
                public void onMessage(ByteBuf message) {
                    message.release();
                    while (call.isReady()) {
                        call.sendMessage(response.slice());
                    }
                    // Request next message
                    call.request(1);
                }

                @Override
                public void onHalfClose() {
                    call.close(Status.OK, new Metadata());
                }

                @Override
                public void onCancel() {
                }

                @Override
                public void onComplete() {
                }

                @Override
                public void onReady() {
                    while (call.isReady()) {
                        call.sendMessage(response.slice());
                    }
                }
            };
        }
    }).build());
    // Build and start the clients and servers
    server = serverBuilder.build();
    server.start();
    channels = new ManagedChannel[channelCount];
    ThreadFactory clientThreadFactory = new DefaultThreadFactory("CTF pool", true);
    for (int i = 0; i < channelCount; i++) {
        // Use a dedicated event-loop for each channel
        channels[i] = channelBuilder.eventLoopGroup(new NioEventLoopGroup(1, clientThreadFactory)).build();
    }
}
Also used : DefaultThreadFactory(io.netty.util.concurrent.DefaultThreadFactory) ThreadFactory(java.util.concurrent.ThreadFactory) ServerCallHandler(io.grpc.ServerCallHandler) InetSocketAddress(java.net.InetSocketAddress) Metadata(io.grpc.Metadata) ByteBuf(io.netty.buffer.ByteBuf) PooledByteBufAllocator(io.netty.buffer.PooledByteBufAllocator) DefaultThreadFactory(io.netty.util.concurrent.DefaultThreadFactory) ServiceDescriptor(io.grpc.ServiceDescriptor) ServerCall(io.grpc.ServerCall) NettyChannelBuilder(io.grpc.netty.NettyChannelBuilder) SocketAddress(java.net.SocketAddress) InetSocketAddress(java.net.InetSocketAddress) NioEventLoopGroup(io.netty.channel.nio.NioEventLoopGroup) NettyServerBuilder(io.grpc.netty.NettyServerBuilder) LocalAddress(io.netty.channel.local.LocalAddress) NioServerSocketChannel(io.netty.channel.socket.nio.NioServerSocketChannel) ServerCredentials(io.grpc.ServerCredentials) InsecureServerCredentials(io.grpc.InsecureServerCredentials) ServerSocket(java.net.ServerSocket) NioSocketChannel(io.netty.channel.socket.nio.NioSocketChannel) ByteBufOutputMarshaller(io.grpc.benchmarks.ByteBufOutputMarshaller)

Example 39 with DefaultThreadFactory

use of io.netty.util.concurrent.DefaultThreadFactory in project incubator-pulsar by apache.

the class PerformanceClient method runPerformanceTest.

public void runPerformanceTest(long messages, long limit, int numOfTopic, int sizeOfMessage, String baseUrl, String topicName, String authPluginClassName, String authParams) throws InterruptedException, FileNotFoundException {
    ExecutorService executor = Executors.newCachedThreadPool(new DefaultThreadFactory("pulsar-perf-producer-exec"));
    HashMap<String, Tuple> producersMap = new HashMap<>();
    String produceBaseEndPoint = baseUrl + "ws/producer" + topicName;
    for (int i = 0; i < numOfTopic; i++) {
        String topic = numOfTopic > 1 ? produceBaseEndPoint + String.valueOf(i) : produceBaseEndPoint;
        URI produceUri = URI.create(topic);
        WebSocketClient produceClient = new WebSocketClient(new SslContextFactory(true));
        ClientUpgradeRequest produceRequest = new ClientUpgradeRequest();
        if (StringUtils.isNotBlank(authPluginClassName) && StringUtils.isNotBlank(authParams)) {
            try {
                Authentication auth = AuthenticationFactory.create(authPluginClassName, authParams);
                auth.start();
                AuthenticationDataProvider authData = auth.getAuthData();
                if (authData.hasDataForHttp()) {
                    for (Map.Entry<String, String> kv : authData.getHttpHeaders()) {
                        produceRequest.setHeader(kv.getKey(), kv.getValue());
                    }
                }
            } catch (Exception e) {
                log.error("Authentication plugin error: " + e.getMessage());
            }
        }
        SimpleTestProducerSocket produceSocket = new SimpleTestProducerSocket();
        try {
            produceClient.start();
            produceClient.connect(produceSocket, produceUri, produceRequest);
        } catch (IOException e1) {
            log.error("Fail in connecting: [{}]", e1.getMessage());
            return;
        } catch (Exception e1) {
            log.error("Fail in starting client[{}]", e1.getMessage());
            return;
        }
        producersMap.put(produceUri.toString(), new Tuple(produceClient, produceRequest, produceSocket));
    }
    // connection to be established
    TimeUnit.SECONDS.sleep(5);
    executor.submit(() -> {
        try {
            RateLimiter rateLimiter = RateLimiter.create(limit);
            // Send messages on all topics/producers
            long totalSent = 0;
            while (true) {
                for (String topic : producersMap.keySet()) {
                    if (messages > 0) {
                        if (totalSent >= messages) {
                            log.trace("------------------- DONE -----------------------");
                            Thread.sleep(10000);
                            System.exit(0);
                        }
                    }
                    rateLimiter.acquire();
                    if (producersMap.get(topic).getSocket().getSession() == null) {
                        Thread.sleep(10000);
                        System.exit(0);
                    }
                    producersMap.get(topic).getSocket().sendMsg(String.valueOf(totalSent++), sizeOfMessage);
                    messagesSent.increment();
                    bytesSent.add(sizeOfMessage);
                }
            }
        } catch (Throwable t) {
            log.error(t.getMessage());
            System.exit(0);
        }
    });
    // Print report stats
    long oldTime = System.nanoTime();
    Histogram reportHistogram = null;
    String statsFileName = "perf-websocket-producer-" + System.currentTimeMillis() + ".hgrm";
    log.info("Dumping latency stats to {} \n", statsFileName);
    PrintStream histogramLog = new PrintStream(new FileOutputStream(statsFileName), false);
    HistogramLogWriter histogramLogWriter = new HistogramLogWriter(histogramLog);
    // Some log header bits
    histogramLogWriter.outputLogFormatVersion();
    histogramLogWriter.outputLegend();
    while (true) {
        try {
            Thread.sleep(5000);
        } catch (InterruptedException e) {
            break;
        }
        long now = System.nanoTime();
        double elapsed = (now - oldTime) / 1e9;
        double rate = messagesSent.sumThenReset() / elapsed;
        double throughput = bytesSent.sumThenReset() / elapsed / 1024 / 1024 * 8;
        reportHistogram = SimpleTestProducerSocket.recorder.getIntervalHistogram(reportHistogram);
        log.info("Throughput produced: {}  msg/s --- {} Mbit/s --- Latency: mean: {} ms - med: {} ms - 95pct: {} ms - 99pct: {} ms - 99.9pct: {} ms - 99.99pct: {} ms", throughputFormat.format(rate), throughputFormat.format(throughput), dec.format(reportHistogram.getMean() / 1000.0), dec.format(reportHistogram.getValueAtPercentile(50) / 1000.0), dec.format(reportHistogram.getValueAtPercentile(95) / 1000.0), dec.format(reportHistogram.getValueAtPercentile(99) / 1000.0), dec.format(reportHistogram.getValueAtPercentile(99.9) / 1000.0), dec.format(reportHistogram.getValueAtPercentile(99.99) / 1000.0));
        histogramLogWriter.outputIntervalHistogram(reportHistogram);
        reportHistogram.reset();
        oldTime = now;
    }
    TimeUnit.SECONDS.sleep(100);
    executor.shutdown();
}
Also used : Histogram(org.HdrHistogram.Histogram) AuthenticationDataProvider(org.apache.pulsar.client.api.AuthenticationDataProvider) HashMap(java.util.HashMap) URI(java.net.URI) DefaultThreadFactory(io.netty.util.concurrent.DefaultThreadFactory) SslContextFactory(org.eclipse.jetty.util.ssl.SslContextFactory) HistogramLogWriter(org.HdrHistogram.HistogramLogWriter) PrintStream(java.io.PrintStream) IOException(java.io.IOException) WebSocketClient(org.eclipse.jetty.websocket.client.WebSocketClient) ParameterException(com.beust.jcommander.ParameterException) IOException(java.io.IOException) FileNotFoundException(java.io.FileNotFoundException) RateLimiter(com.google.common.util.concurrent.RateLimiter) Authentication(org.apache.pulsar.client.api.Authentication) FileOutputStream(java.io.FileOutputStream) ExecutorService(java.util.concurrent.ExecutorService) ClientUpgradeRequest(org.eclipse.jetty.websocket.client.ClientUpgradeRequest) HashMap(java.util.HashMap) Map(java.util.Map)

Example 40 with DefaultThreadFactory

use of io.netty.util.concurrent.DefaultThreadFactory in project incubator-pulsar by apache.

the class ZookeeperCacheTest method testZkCallbackThreadStuck.

/**
 * Verifies that blocking call on zkCache-callback will not introduce deadlock because zkCache completes
 * future-result with different thread than zookeeper-client thread.
 *
 * @throws Exception
 */
@Test(timeOut = 2000)
void testZkCallbackThreadStuck() throws Exception {
    OrderedScheduler executor = OrderedScheduler.newSchedulerBuilder().build();
    ScheduledExecutorService scheduledExecutor = Executors.newScheduledThreadPool(2);
    ExecutorService zkExecutor = Executors.newSingleThreadExecutor(new DefaultThreadFactory("mockZk"));
    // add readOpDelayMs so, main thread will not serve zkCacahe-returned future and let zkExecutor-thread handle
    // callback-result process
    MockZooKeeper zkClient = MockZooKeeper.newInstance(zkExecutor, 100);
    ZooKeeperCache zkCacheService = new LocalZooKeeperCache(zkClient, executor, scheduledExecutor);
    ZooKeeperDataCache<String> zkCache = new ZooKeeperDataCache<String>(zkCacheService) {

        @Override
        public String deserialize(String key, byte[] content) throws Exception {
            return new String(content);
        }
    };
    String value = "test";
    String key = "/" + UUID.randomUUID().toString().substring(0, 8);
    String key1 = "/" + UUID.randomUUID().toString().substring(0, 8);
    String key2 = "/" + UUID.randomUUID().toString().substring(0, 8);
    zkClient.create(key, value.getBytes(), null, null);
    zkClient.create(key1, value.getBytes(), null, null);
    zkClient.create(key2, value.getBytes(), null, null);
    CountDownLatch latch = new CountDownLatch(1);
    zkCache.getAsync(key).thenAccept(val -> {
        try {
            zkCache.get(key1);
        } catch (Exception e) {
            fail("failed to get " + key2, e);
        }
        latch.countDown();
    });
    latch.await();
    executor.shutdown();
    zkExecutor.shutdown();
    scheduledExecutor.shutdown();
}
Also used : DefaultThreadFactory(io.netty.util.concurrent.DefaultThreadFactory) MockZooKeeper(org.apache.zookeeper.MockZooKeeper) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) ExecutorService(java.util.concurrent.ExecutorService) CountDownLatch(java.util.concurrent.CountDownLatch) OrderedScheduler(org.apache.bookkeeper.common.util.OrderedScheduler) Test(org.testng.annotations.Test)

Aggregations

DefaultThreadFactory (io.netty.util.concurrent.DefaultThreadFactory)49 NioEventLoopGroup (io.netty.channel.nio.NioEventLoopGroup)24 EventLoopGroup (io.netty.channel.EventLoopGroup)15 ThreadFactory (java.util.concurrent.ThreadFactory)12 ChannelFuture (io.netty.channel.ChannelFuture)9 ExecutorService (java.util.concurrent.ExecutorService)8 Test (org.junit.jupiter.api.Test)8 EpollEventLoopGroup (io.netty.channel.epoll.EpollEventLoopGroup)7 IOException (java.io.IOException)7 ServerBootstrap (io.netty.bootstrap.ServerBootstrap)6 NioServerSocketChannel (io.netty.channel.socket.nio.NioServerSocketChannel)6 UdtChannel (io.netty.channel.udt.UdtChannel)6 LoggingHandler (io.netty.handler.logging.LoggingHandler)6 CountDownLatch (java.util.concurrent.CountDownLatch)6 RateLimiter (com.google.common.util.concurrent.RateLimiter)5 Channel (io.netty.channel.Channel)5 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)5 Timeout (org.junit.jupiter.api.Timeout)5 Test (org.testng.annotations.Test)5 ParameterException (com.beust.jcommander.ParameterException)4