use of org.apache.flink.shaded.netty4.io.netty.util.concurrent.Future in project flink by apache.
the class ClientTest method testConcurrentQueries.
/**
* Multiple threads concurrently fire queries.
*/
@Test
public void testConcurrentQueries() throws Exception {
AtomicKvStateRequestStats stats = new AtomicKvStateRequestStats();
final MessageSerializer<KvStateInternalRequest, KvStateResponse> serializer = new MessageSerializer<>(new KvStateInternalRequest.KvStateInternalRequestDeserializer(), new KvStateResponse.KvStateResponseDeserializer());
ExecutorService executor = null;
Client<KvStateInternalRequest, KvStateResponse> client = null;
Channel serverChannel = null;
final byte[] serializedResult = new byte[1024];
ThreadLocalRandom.current().nextBytes(serializedResult);
try {
int numQueryTasks = 4;
final int numQueriesPerTask = 1024;
executor = Executors.newFixedThreadPool(numQueryTasks);
client = new Client<>("Test Client", 1, serializer, stats);
serverChannel = createServerChannel(new RespondingChannelHandler(serializer, serializedResult));
final InetSocketAddress serverAddress = getKvStateServerAddress(serverChannel);
final Client<KvStateInternalRequest, KvStateResponse> finalClient = client;
Callable<List<CompletableFuture<KvStateResponse>>> queryTask = () -> {
List<CompletableFuture<KvStateResponse>> results = new ArrayList<>(numQueriesPerTask);
for (int i = 0; i < numQueriesPerTask; i++) {
KvStateInternalRequest request = new KvStateInternalRequest(new KvStateID(), new byte[0]);
results.add(finalClient.sendRequest(serverAddress, request));
}
return results;
};
// Submit query tasks
List<Future<List<CompletableFuture<KvStateResponse>>>> futures = new ArrayList<>();
for (int i = 0; i < numQueryTasks; i++) {
futures.add(executor.submit(queryTask));
}
// Verify results
for (Future<List<CompletableFuture<KvStateResponse>>> future : futures) {
List<CompletableFuture<KvStateResponse>> results = future.get();
for (CompletableFuture<KvStateResponse> result : results) {
KvStateResponse actual = result.get();
assertArrayEquals(serializedResult, actual.getContent());
}
}
int totalQueries = numQueryTasks * numQueriesPerTask;
// Counts can take some time to propagate
while (stats.getNumSuccessful() != totalQueries) {
Thread.sleep(100L);
}
assertEquals(totalQueries, stats.getNumRequests());
assertEquals(totalQueries, stats.getNumSuccessful());
} finally {
if (executor != null) {
executor.shutdown();
}
if (serverChannel != null) {
serverChannel.close();
}
if (client != null) {
try {
client.shutdown().get();
} catch (Exception e) {
e.printStackTrace();
}
Assert.assertTrue(client.isEventGroupShutdown());
}
assertEquals("Channel leak", 0L, stats.getNumConnections());
}
}
use of org.apache.flink.shaded.netty4.io.netty.util.concurrent.Future in project flink by apache.
the class ClientTest method testFailureClosesChannel.
/**
* Tests that a server failure closes the connection and removes it from the established
* connections.
*/
@Test
public void testFailureClosesChannel() throws Exception {
AtomicKvStateRequestStats stats = new AtomicKvStateRequestStats();
final MessageSerializer<KvStateInternalRequest, KvStateResponse> serializer = new MessageSerializer<>(new KvStateInternalRequest.KvStateInternalRequestDeserializer(), new KvStateResponse.KvStateResponseDeserializer());
Client<KvStateInternalRequest, KvStateResponse> client = null;
Channel serverChannel = null;
try {
client = new Client<>("Test Client", 1, serializer, stats);
final LinkedBlockingQueue<ByteBuf> received = new LinkedBlockingQueue<>();
final AtomicReference<Channel> channel = new AtomicReference<>();
serverChannel = createServerChannel(new ChannelDataCollectingHandler(channel, received));
InetSocketAddress serverAddress = getKvStateServerAddress(serverChannel);
// Requests
List<Future<KvStateResponse>> futures = new ArrayList<>();
KvStateInternalRequest request = new KvStateInternalRequest(new KvStateID(), new byte[0]);
futures.add(client.sendRequest(serverAddress, request));
futures.add(client.sendRequest(serverAddress, request));
ByteBuf buf = received.take();
assertNotNull("Receive timed out", buf);
buf.release();
buf = received.take();
assertNotNull("Receive timed out", buf);
buf.release();
assertEquals(1L, stats.getNumConnections());
Channel ch = channel.get();
assertNotNull("Channel not active", ch);
// Respond with failure
ch.writeAndFlush(MessageSerializer.serializeServerFailure(serverChannel.alloc(), new RuntimeException("Expected test server failure")));
try {
futures.remove(0).get();
fail("Did not throw expected server failure");
} catch (ExecutionException e) {
if (!(e.getCause() instanceof RuntimeException)) {
fail("Did not throw expected Exception");
}
// Expected
}
try {
futures.remove(0).get();
fail("Did not throw expected server failure");
} catch (ExecutionException e) {
if (!(e.getCause() instanceof RuntimeException)) {
fail("Did not throw expected Exception");
}
// Expected
}
assertEquals(0L, stats.getNumConnections());
// Counts can take some time to propagate
while (stats.getNumSuccessful() != 0L || stats.getNumFailed() != 2L) {
Thread.sleep(100L);
}
assertEquals(2L, stats.getNumRequests());
assertEquals(0L, stats.getNumSuccessful());
assertEquals(2L, stats.getNumFailed());
} finally {
if (client != null) {
try {
client.shutdown().get();
} catch (Exception e) {
e.printStackTrace();
}
Assert.assertTrue(client.isEventGroupShutdown());
}
if (serverChannel != null) {
serverChannel.close();
}
assertEquals("Channel leak", 0L, stats.getNumConnections());
}
}
use of org.apache.flink.shaded.netty4.io.netty.util.concurrent.Future in project flink by apache.
the class AbstractServerBase method attemptToBind.
/**
* Tries to start the server at the provided port.
*
* <p>This, in conjunction with {@link #start()}, try to start the server on a free port among
* the port range provided at the constructor.
*
* @param port the port to try to bind the server to.
* @throws Exception If something goes wrong during the bind operation.
*/
private boolean attemptToBind(final int port) throws Throwable {
log.debug("Attempting to start {} on port {}.", serverName, port);
this.queryExecutor = createQueryExecutor();
this.handler = initializeHandler();
final NettyBufferPool bufferPool = new NettyBufferPool(numEventLoopThreads);
final ThreadFactory threadFactory = new ThreadFactoryBuilder().setDaemon(true).setNameFormat("Flink " + serverName + " EventLoop Thread %d").build();
final NioEventLoopGroup nioGroup = new NioEventLoopGroup(numEventLoopThreads, threadFactory);
this.bootstrap = new ServerBootstrap().localAddress(bindAddress, port).group(nioGroup).channel(NioServerSocketChannel.class).option(ChannelOption.ALLOCATOR, bufferPool).childOption(ChannelOption.ALLOCATOR, bufferPool).childHandler(new ServerChannelInitializer<>(handler));
// from DefaultChannelConfig (not exposed)
final int defaultHighWaterMark = 64 * 1024;
// (ignore warning here to make this flexible in case the configuration values change)
if (LOW_WATER_MARK > defaultHighWaterMark) {
bootstrap.childOption(ChannelOption.WRITE_BUFFER_HIGH_WATER_MARK, HIGH_WATER_MARK);
bootstrap.childOption(ChannelOption.WRITE_BUFFER_LOW_WATER_MARK, LOW_WATER_MARK);
} else {
// including (newHighWaterMark < defaultLowWaterMark)
bootstrap.childOption(ChannelOption.WRITE_BUFFER_LOW_WATER_MARK, LOW_WATER_MARK);
bootstrap.childOption(ChannelOption.WRITE_BUFFER_HIGH_WATER_MARK, HIGH_WATER_MARK);
}
try {
final ChannelFuture future = bootstrap.bind().sync();
if (future.isSuccess()) {
final InetSocketAddress localAddress = (InetSocketAddress) future.channel().localAddress();
serverAddress = new InetSocketAddress(localAddress.getAddress(), localAddress.getPort());
return true;
}
throw future.cause();
} catch (BindException e) {
log.debug("Failed to start {} on port {}: {}.", serverName, port, e.getMessage());
try {
// we shutdown the server but we reset the future every time because in
// case of failure to bind, we will call attemptToBind() here, and not resetting
// the flag will interfere with future shutdown attempts.
shutdownServer().whenComplete((ignoredV, ignoredT) -> serverShutdownFuture.getAndSet(null)).get();
} catch (Exception r) {
// Here we were seeing this problem:
// https://github.com/netty/netty/issues/4357 if we do a get().
// this is why we now simply wait a bit so that everything is shut down.
log.warn("Problem while shutting down {}: {}", serverName, r.getMessage());
}
}
// any other type of exception we let it bubble up.
return false;
}
use of org.apache.flink.shaded.netty4.io.netty.util.concurrent.Future in project flink by apache.
the class PartitionRequestClientFactoryTest method testNettyClientConnectRetryMultipleThread.
@Test
public void testNettyClientConnectRetryMultipleThread() throws Exception {
NettyTestUtil.NettyServerAndClient serverAndClient = createNettyServerAndClient();
UnstableNettyClient unstableNettyClient = new UnstableNettyClient(serverAndClient.client(), 2);
PartitionRequestClientFactory factory = new PartitionRequestClientFactory(unstableNettyClient, 2, 1, connectionReuseEnabled);
ExecutorService threadPoolExecutor = Executors.newFixedThreadPool(10);
List<Future<NettyPartitionRequestClient>> futures = new ArrayList<>();
for (int i = 0; i < 10; i++) {
Future<NettyPartitionRequestClient> future = threadPoolExecutor.submit(() -> {
NettyPartitionRequestClient client = null;
try {
client = factory.createPartitionRequestClient(serverAndClient.getConnectionID(0));
} catch (Exception e) {
fail(e.getMessage());
}
return client;
});
futures.add(future);
}
futures.forEach(runnableFuture -> {
NettyPartitionRequestClient client;
try {
client = runnableFuture.get();
assertNotNull(client);
} catch (Exception e) {
System.out.println(e.getMessage());
fail();
}
});
threadPoolExecutor.shutdown();
serverAndClient.client().shutdown();
serverAndClient.server().shutdown();
}
use of org.apache.flink.shaded.netty4.io.netty.util.concurrent.Future in project netty by netty.
the class DefaultHttp2ConnectionTest method removeAllStreamsWhileIteratingActiveStreamsAndExceptionOccurs.
@Test
public void removeAllStreamsWhileIteratingActiveStreamsAndExceptionOccurs() throws Exception {
final Endpoint<Http2RemoteFlowController> remote = client.remote();
final Endpoint<Http2LocalFlowController> local = client.local();
for (int c = 3, s = 2; c < 5000; c += 2, s += 2) {
local.createStream(c, false);
remote.createStream(s, false);
}
final Promise<Void> promise = group.next().newPromise();
final CountDownLatch latch = new CountDownLatch(1);
try {
client.forEachActiveStream(new Http2StreamVisitor() {
@Override
public boolean visit(Http2Stream stream) throws Http2Exception {
// This close call is basically a noop, because the following statement will throw an exception.
client.close(promise);
// Do an invalid operation while iterating.
remote.createStream(3, false);
return true;
}
});
} catch (Http2Exception ignored) {
client.close(promise).addListener(new FutureListener<Void>() {
@Override
public void operationComplete(Future<Void> future) throws Exception {
assertTrue(promise.isDone());
latch.countDown();
}
});
}
assertTrue(latch.await(5, TimeUnit.SECONDS));
}
Aggregations