use of io.netty.util.concurrent.DefaultThreadFactory in project netty by netty.
the class NettyBlockHoundIntegrationTest method testSingleThreadEventExecutorTakeTask.
@Test
@Timeout(value = 5000, unit = TimeUnit.MILLISECONDS)
public void testSingleThreadEventExecutorTakeTask() throws InterruptedException {
SingleThreadEventExecutor executor = new SingleThreadEventExecutor(null, new DefaultThreadFactory("test"), true) {
@Override
protected void run() {
while (!confirmShutdown()) {
Runnable task = takeTask();
if (task != null) {
task.run();
}
}
}
};
testEventExecutorTakeTask(executor);
}
use of io.netty.util.concurrent.DefaultThreadFactory in project netty by netty.
the class ChannelOutboundBufferTest method testWriteTaskRejected.
@Test
@Timeout(value = 5000, unit = TimeUnit.MILLISECONDS)
public void testWriteTaskRejected() throws Exception {
final SingleThreadEventExecutor executor = new SingleThreadEventExecutor(null, new DefaultThreadFactory("executorPool"), true, 1, RejectedExecutionHandlers.reject()) {
@Override
protected void run() {
do {
Runnable task = takeTask();
if (task != null) {
task.run();
updateLastExecutionTime();
}
} while (!confirmShutdown());
}
@Override
protected Queue<Runnable> newTaskQueue(int maxPendingTasks) {
return super.newTaskQueue(1);
}
};
final CountDownLatch handlerAddedLatch = new CountDownLatch(1);
final CountDownLatch handlerRemovedLatch = new CountDownLatch(1);
EmbeddedChannel ch = new EmbeddedChannel();
ch.pipeline().addLast(executor, "handler", new ChannelOutboundHandlerAdapter() {
@Override
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) {
promise.setFailure(new AssertionError("Should not be called"));
}
@Override
public void handlerAdded(ChannelHandlerContext ctx) {
handlerAddedLatch.countDown();
}
@Override
public void handlerRemoved(ChannelHandlerContext ctx) {
handlerRemovedLatch.countDown();
}
});
// Lets wait until we are sure the handler was added.
handlerAddedLatch.await();
final CountDownLatch executeLatch = new CountDownLatch(1);
final CountDownLatch runLatch = new CountDownLatch(1);
executor.execute(new Runnable() {
@Override
public void run() {
try {
runLatch.countDown();
executeLatch.await();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
});
runLatch.await();
executor.execute(new Runnable() {
@Override
public void run() {
// Will not be executed but ensure the pending count is 1.
}
});
assertEquals(1, executor.pendingTasks());
assertEquals(0, ch.unsafe().outboundBuffer().totalPendingWriteBytes());
ByteBuf buffer = buffer(128).writeZero(128);
ChannelFuture future = ch.write(buffer);
ch.runPendingTasks();
assertTrue(future.cause() instanceof RejectedExecutionException);
assertEquals(0, buffer.refCnt());
// In case of rejected task we should not have anything pending.
assertEquals(0, ch.unsafe().outboundBuffer().totalPendingWriteBytes());
executeLatch.countDown();
while (executor.pendingTasks() != 0) {
// Wait until there is no more pending task left.
Thread.sleep(10);
}
ch.pipeline().remove("handler");
// Ensure we do not try to shutdown the executor before we handled everything for the Channel. Otherwise
// the Executor may reject when the Channel tries to add a task to it.
handlerRemovedLatch.await();
safeClose(ch);
executor.shutdownGracefully();
}
use of io.netty.util.concurrent.DefaultThreadFactory in project netty by netty.
the class DnsNativeClient method main.
public static void main(String[] args) throws Exception {
NioEventLoopGroup group = new NioEventLoopGroup(1, new DefaultThreadFactory("netty"));
DnsAddressResolverGroup resolverGroup = new DnsAddressResolverGroup(NioDatagramChannel.class, DnsServerAddressStreamProviders.platformDefault());
AddressResolver<InetSocketAddress> resolver = resolverGroup.getResolver(group.next());
System.out.println(resolver);
resolver.close();
group.shutdownGracefully().get();
}
use of io.netty.util.concurrent.DefaultThreadFactory in project grpc-java by grpc.
the class AbstractBenchmark method setup.
/**
* Initialize the environment for the executor.
*/
public void setup(ExecutorType clientExecutor, ExecutorType serverExecutor, MessageSize requestSize, MessageSize responseSize, FlowWindowSize windowSize, ChannelType channelType, int maxConcurrentStreams, int channelCount) throws Exception {
ServerCredentials serverCreds = InsecureServerCredentials.create();
NettyServerBuilder serverBuilder;
NettyChannelBuilder channelBuilder;
if (channelType == ChannelType.LOCAL) {
LocalAddress address = new LocalAddress("netty-e2e-benchmark");
serverBuilder = NettyServerBuilder.forAddress(address, serverCreds);
serverBuilder.channelType(LocalServerChannel.class);
channelBuilder = NettyChannelBuilder.forAddress(address);
channelBuilder.channelType(LocalChannel.class);
} else {
ServerSocket sock = new ServerSocket();
// Pick a port using an ephemeral socket.
sock.bind(new InetSocketAddress(BENCHMARK_ADDR, 0));
SocketAddress address = sock.getLocalSocketAddress();
sock.close();
serverBuilder = NettyServerBuilder.forAddress(address, serverCreds).channelType(NioServerSocketChannel.class);
channelBuilder = NettyChannelBuilder.forAddress(address).channelType(NioSocketChannel.class);
}
if (serverExecutor == ExecutorType.DIRECT) {
serverBuilder.directExecutor();
}
if (clientExecutor == ExecutorType.DIRECT) {
channelBuilder.directExecutor();
}
// Always use a different worker group from the client.
ThreadFactory serverThreadFactory = new DefaultThreadFactory("STF pool", true);
serverBuilder.workerEventLoopGroup(new NioEventLoopGroup(0, serverThreadFactory));
serverBuilder.bossEventLoopGroup(new NioEventLoopGroup(1, serverThreadFactory));
// Always set connection and stream window size to same value
serverBuilder.flowControlWindow(windowSize.bytes());
channelBuilder.flowControlWindow(windowSize.bytes());
channelBuilder.negotiationType(NegotiationType.PLAINTEXT);
serverBuilder.maxConcurrentCallsPerConnection(maxConcurrentStreams);
// Create buffers of the desired size for requests and responses.
PooledByteBufAllocator alloc = PooledByteBufAllocator.DEFAULT;
// Use a heap buffer for now, since MessageFramer doesn't know how to directly convert this
// into a WritableBuffer
// TODO(carl-mastrangelo): convert this into a regular buffer() call. See
// https://github.com/grpc/grpc-java/issues/2062#issuecomment-234646216
request = alloc.heapBuffer(requestSize.bytes());
request.writerIndex(request.capacity() - 1);
response = alloc.heapBuffer(responseSize.bytes());
response.writerIndex(response.capacity() - 1);
// Simple method that sends and receives NettyByteBuf
unaryMethod = MethodDescriptor.<ByteBuf, ByteBuf>newBuilder().setType(MethodType.UNARY).setFullMethodName("benchmark/unary").setRequestMarshaller(new ByteBufOutputMarshaller()).setResponseMarshaller(new ByteBufOutputMarshaller()).build();
pingPongMethod = unaryMethod.toBuilder().setType(MethodType.BIDI_STREAMING).setFullMethodName("benchmark/pingPong").build();
flowControlledStreaming = pingPongMethod.toBuilder().setFullMethodName("benchmark/flowControlledStreaming").build();
// Server implementation of unary & streaming methods
serverBuilder.addService(ServerServiceDefinition.builder(new ServiceDescriptor("benchmark", unaryMethod, pingPongMethod, flowControlledStreaming)).addMethod(unaryMethod, new ServerCallHandler<ByteBuf, ByteBuf>() {
@Override
public ServerCall.Listener<ByteBuf> startCall(final ServerCall<ByteBuf, ByteBuf> call, Metadata headers) {
call.sendHeaders(new Metadata());
call.request(1);
return new ServerCall.Listener<ByteBuf>() {
@Override
public void onMessage(ByteBuf message) {
// no-op
message.release();
call.sendMessage(response.slice());
}
@Override
public void onHalfClose() {
call.close(Status.OK, new Metadata());
}
@Override
public void onCancel() {
}
@Override
public void onComplete() {
}
};
}
}).addMethod(pingPongMethod, new ServerCallHandler<ByteBuf, ByteBuf>() {
@Override
public ServerCall.Listener<ByteBuf> startCall(final ServerCall<ByteBuf, ByteBuf> call, Metadata headers) {
call.sendHeaders(new Metadata());
call.request(1);
return new ServerCall.Listener<ByteBuf>() {
@Override
public void onMessage(ByteBuf message) {
message.release();
call.sendMessage(response.slice());
// Request next message
call.request(1);
}
@Override
public void onHalfClose() {
call.close(Status.OK, new Metadata());
}
@Override
public void onCancel() {
}
@Override
public void onComplete() {
}
};
}
}).addMethod(flowControlledStreaming, new ServerCallHandler<ByteBuf, ByteBuf>() {
@Override
public ServerCall.Listener<ByteBuf> startCall(final ServerCall<ByteBuf, ByteBuf> call, Metadata headers) {
call.sendHeaders(new Metadata());
call.request(1);
return new ServerCall.Listener<ByteBuf>() {
@Override
public void onMessage(ByteBuf message) {
message.release();
while (call.isReady()) {
call.sendMessage(response.slice());
}
// Request next message
call.request(1);
}
@Override
public void onHalfClose() {
call.close(Status.OK, new Metadata());
}
@Override
public void onCancel() {
}
@Override
public void onComplete() {
}
@Override
public void onReady() {
while (call.isReady()) {
call.sendMessage(response.slice());
}
}
};
}
}).build());
// Build and start the clients and servers
server = serverBuilder.build();
server.start();
channels = new ManagedChannel[channelCount];
ThreadFactory clientThreadFactory = new DefaultThreadFactory("CTF pool", true);
for (int i = 0; i < channelCount; i++) {
// Use a dedicated event-loop for each channel
channels[i] = channelBuilder.eventLoopGroup(new NioEventLoopGroup(1, clientThreadFactory)).build();
}
}
Aggregations