use of io.netty.util.concurrent.DefaultEventExecutor in project moleculer-java by moleculer-java.
the class RedisPubSubClient method connect.
// --- CONNECT ---
public final void connect() {
DefaultClientResources.Builder builder = DefaultClientResources.builder();
acceptor = Executors.newSingleThreadExecutor();
group = new NioEventLoopGroup(1, acceptor);
builder.eventLoopGroupProvider(new EventLoopGroupProvider() {
@Override
public final int threadPoolSize() {
return 1;
}
@Override
public final Future<Boolean> shutdown(long quietPeriod, long timeout, TimeUnit timeUnit) {
return null;
}
@Override
public final Future<Boolean> release(EventExecutorGroup eventLoopGroup, long quietPeriod, long timeout, TimeUnit unit) {
return null;
}
@SuppressWarnings("unchecked")
@Override
public final <T extends EventLoopGroup> T allocate(Class<T> type) {
return (T) group;
}
});
builder.eventExecutorGroup(new DefaultEventExecutor(executor));
if (eventBus == null) {
builder.eventBus(new EventBus() {
@Override
public final void publish(Event event) {
}
@Override
public final Observable<Event> get() {
return null;
}
});
} else {
builder.eventBus(eventBus);
}
resources = builder.build();
List<RedisURI> redisURIs = parseURLs(urls, password, secure);
StatefulRedisPubSubConnection<byte[], byte[]> connection;
ByteArrayCodec codec = new ByteArrayCodec();
if (urls.length > 1) {
// Clustered client
connection = RedisClusterClient.create(resources, redisURIs).connectPubSub(codec);
} else {
// Single connection
connection = RedisClient.create(resources, redisURIs.get(0)).connectPubSub(codec);
}
// Add listener
if (listener != null) {
connection.addListener(listener);
}
commands = connection.async();
}
use of io.netty.util.concurrent.DefaultEventExecutor in project netty by netty.
the class BurstCostExecutorsBenchmark method setup.
@Setup
public void setup() {
ExecutorType type = ExecutorType.valueOf(executorType);
switch(type) {
case spinning:
// The case with 3 producers can have a peak of 3*burstLength offers:
// 4 is to leave some room between the offers and 1024 is to leave some room
// between producer/consumer when work is > 0 and 1 producer.
// If work = 0 then the task queue is supposed to be near empty most of the time.
executor = new SpinExecutorService(Math.min(1024, burstLength * 4));
executorToShutdown = executor;
break;
case defaultEventExecutor:
executor = new DefaultEventExecutor();
executorToShutdown = executor;
break;
case juc:
executor = Executors.newSingleThreadScheduledExecutor();
executorToShutdown = executor;
break;
case nioEventLoop:
NioEventLoopGroup nioEventLoopGroup = new NioEventLoopGroup(1);
nioEventLoopGroup.setIoRatio(1);
executor = nioEventLoopGroup.next();
executorToShutdown = nioEventLoopGroup;
break;
case epollEventLoop:
Epoll.ensureAvailability();
EpollEventLoopGroup epollEventLoopGroup = new EpollEventLoopGroup(1);
epollEventLoopGroup.setIoRatio(1);
executor = epollEventLoopGroup.next();
executorToShutdown = epollEventLoopGroup;
break;
case kqueueEventLoop:
KQueue.ensureAvailability();
KQueueEventLoopGroup kQueueEventLoopGroup = new KQueueEventLoopGroup(1);
kQueueEventLoopGroup.setIoRatio(1);
executor = kQueueEventLoopGroup.next();
executorToShutdown = kQueueEventLoopGroup;
break;
}
}
use of io.netty.util.concurrent.DefaultEventExecutor in project reactor-netty by reactor.
the class HttpServerTests method testGracefulShutdown.
@Test
void testGracefulShutdown() throws Exception {
CountDownLatch latch1 = new CountDownLatch(2);
CountDownLatch latch2 = new CountDownLatch(2);
CountDownLatch latch3 = new CountDownLatch(1);
LoopResources loop = LoopResources.create("testGracefulShutdown");
disposableServer = createServer().runOn(loop).doOnConnection(c -> {
c.onDispose().subscribe(null, null, latch2::countDown);
latch1.countDown();
}).channelGroup(new DefaultChannelGroup(new DefaultEventExecutor())).route(r -> r.get("/delay500", (req, res) -> res.sendString(Mono.just("delay500").delayElement(Duration.ofMillis(500)))).get("/delay1000", (req, res) -> res.sendString(Mono.just("delay1000").delayElement(Duration.ofSeconds(1))))).bindNow(Duration.ofSeconds(30));
HttpClient client = createClient(disposableServer::address);
AtomicReference<String> result = new AtomicReference<>();
Flux.just("/delay500", "/delay1000").flatMap(s -> client.get().uri(s).responseContent().aggregate().asString()).collect(Collectors.joining()).subscribe(s -> {
result.set(s);
latch3.countDown();
});
assertThat(latch1.await(30, TimeUnit.SECONDS)).isTrue();
// Stop accepting incoming requests, wait at most 3s for the active requests to finish
disposableServer.disposeNow();
assertThat(latch2.await(30, TimeUnit.SECONDS)).isTrue();
// Dispose the event loop
loop.disposeLater().block(Duration.ofSeconds(30));
assertThat(latch3.await(30, TimeUnit.SECONDS)).isTrue();
assertThat(result.get()).isNotNull().isEqualTo("delay500delay1000");
}
use of io.netty.util.concurrent.DefaultEventExecutor in project reactor-netty by reactor.
the class TcpServerTests method testGracefulShutdown.
@Test
void testGracefulShutdown() throws Exception {
CountDownLatch latch1 = new CountDownLatch(2);
CountDownLatch latch2 = new CountDownLatch(2);
CountDownLatch latch3 = new CountDownLatch(1);
LoopResources loop = LoopResources.create("testGracefulShutdown");
DisposableServer disposableServer = TcpServer.create().port(0).runOn(loop).doOnConnection(c -> {
c.onDispose().subscribe(null, null, latch2::countDown);
latch1.countDown();
}).channelGroup(new DefaultChannelGroup(new DefaultEventExecutor())).handle((in, out) -> out.sendString(Mono.just("delay1000").delayElement(Duration.ofSeconds(1)))).wiretap(true).bindNow(Duration.ofSeconds(30));
TcpClient client = TcpClient.create().remoteAddress(disposableServer::address).wiretap(true);
AtomicReference<String> result = new AtomicReference<>();
Flux.merge(client.connect(), client.connect()).flatMap(conn -> conn.inbound().receive().asString()).collect(Collectors.joining()).subscribe(s -> {
result.set(s);
latch3.countDown();
});
assertThat(latch1.await(30, TimeUnit.SECONDS)).as("latch await").isTrue();
// Stop accepting incoming requests, wait at most 3s for the active requests to finish
disposableServer.disposeNow();
// Dispose the event loop
loop.disposeLater().block(Duration.ofSeconds(30));
assertThat(latch2.await(30, TimeUnit.SECONDS)).as("latch await").isTrue();
assertThat(latch3.await(30, TimeUnit.SECONDS)).as("latch await").isTrue();
assertThat(result.get()).isNotNull().isEqualTo("delay1000delay1000");
}
use of io.netty.util.concurrent.DefaultEventExecutor in project moleculer-java by moleculer-java.
the class RedisGetSetClient method connect.
// --- CONNECT ---
public final void connect() {
DefaultClientResources.Builder builder = DefaultClientResources.builder();
acceptor = Executors.newSingleThreadExecutor();
group = new NioEventLoopGroup(1, acceptor);
builder.eventLoopGroupProvider(new EventLoopGroupProvider() {
@Override
public final int threadPoolSize() {
return 1;
}
@Override
public final Future<Boolean> shutdown(long quietPeriod, long timeout, TimeUnit timeUnit) {
return null;
}
@Override
public final Future<Boolean> release(EventExecutorGroup eventLoopGroup, long quietPeriod, long timeout, TimeUnit unit) {
return null;
}
@SuppressWarnings("unchecked")
@Override
public final <T extends EventLoopGroup> T allocate(Class<T> type) {
return (T) group;
}
});
builder.eventExecutorGroup(new DefaultEventExecutor(executor));
if (eventBus == null) {
builder.eventBus(new EventBus() {
@Override
public final void publish(Event event) {
}
@Override
public final Observable<Event> get() {
return null;
}
});
} else {
builder.eventBus(eventBus);
}
resources = builder.build();
List<RedisURI> redisURIs = parseURLs(urls, password, secure);
ByteArrayCodec codec = new ByteArrayCodec();
if (urls.length > 1) {
// Clustered client
clusteredClient = RedisClusterClient.create(resources, redisURIs).connect(codec).async();
} else {
// Single server connection
client = RedisClient.create(resources, redisURIs.get(0)).connect(codec).async();
}
}
Aggregations