use of org.apache.flink.shaded.netty4.io.netty.util.concurrent.Future in project redisson by redisson.
the class ReplicatedConnectionManager method scheduleMasterChangeCheck.
private void scheduleMasterChangeCheck(final ReplicatedServersConfig cfg) {
monitorFuture = GlobalEventExecutor.INSTANCE.schedule(new Runnable() {
@Override
public void run() {
final URL master = currentMaster.get();
log.debug("Current master: {}", master);
final AtomicInteger count = new AtomicInteger(cfg.getNodeAddresses().size());
for (final URL addr : cfg.getNodeAddresses()) {
if (isShuttingDown()) {
return;
}
RFuture<RedisConnection> connectionFuture = connect(cfg, addr);
connectionFuture.addListener(new FutureListener<RedisConnection>() {
@Override
public void operationComplete(Future<RedisConnection> future) throws Exception {
if (!future.isSuccess()) {
log.error(future.cause().getMessage(), future.cause());
if (count.decrementAndGet() == 0) {
scheduleMasterChangeCheck(cfg);
}
return;
}
if (isShuttingDown()) {
return;
}
RedisConnection connection = future.getNow();
RFuture<Map<String, String>> result = connection.async(RedisCommands.INFO_REPLICATION);
result.addListener(new FutureListener<Map<String, String>>() {
@Override
public void operationComplete(Future<Map<String, String>> future) throws Exception {
if (!future.isSuccess()) {
log.error(future.cause().getMessage(), future.cause());
if (count.decrementAndGet() == 0) {
scheduleMasterChangeCheck(cfg);
}
return;
}
Role role = Role.valueOf(future.getNow().get(ROLE_KEY));
if (Role.master.equals(role)) {
if (master.equals(addr)) {
log.debug("Current master {} unchanged", master);
} else if (currentMaster.compareAndSet(master, addr)) {
log.info("Master has changed from {} to {}", master, addr);
changeMaster(singleSlotRange.getStartSlot(), addr.getHost(), addr.getPort());
}
}
if (count.decrementAndGet() == 0) {
scheduleMasterChangeCheck(cfg);
}
}
});
}
});
}
}
}, cfg.getScanInterval(), TimeUnit.MILLISECONDS);
}
use of org.apache.flink.shaded.netty4.io.netty.util.concurrent.Future in project riposte by Nike-Inc.
the class StreamingAsyncHttpClient method streamDownstreamCall.
/**
* TODO: Fully document me.
* <br/>
* NOTE: The returned CompletableFuture will only be completed successfully if the connection to the downstream
* server was successful and the initialRequestChunk was successfully written out. This has implications for
* initialRequestChunk regarding releasing its reference count (i.e. calling {@link
* io.netty.util.ReferenceCountUtil#release(Object)} and passing in initialRequestChunk). If the returned
* CompletableFuture is successful it means initialRequestChunk's reference count will already be reduced by one
* relative to when this method was called because it will have been passed to a successful {@link
* ChannelHandlerContext#writeAndFlush(Object)} method call.
* <p/>
* Long story short - assume initialRequestChunk is an object with a reference count of x:
* <ul>
* <li>
* If the returned CompletableFuture is successful, then when it completes successfully
* initialRequestChunk's reference count will be x - 1
* </li>
* <li>
* If the returned CompletableFuture is *NOT* successful, then when it completes initialRequestChunk's
* reference count will still be x
* </li>
* </ul>
*/
public CompletableFuture<StreamingChannel> streamDownstreamCall(String downstreamHost, int downstreamPort, HttpRequest initialRequestChunk, boolean isSecureHttpsCall, boolean relaxedHttpsValidation, StreamingCallback callback, long downstreamCallTimeoutMillis, boolean performSubSpanAroundDownstreamCalls, boolean addTracingHeadersToDownstreamCall, ChannelHandlerContext ctx) {
CompletableFuture<StreamingChannel> streamingChannel = new CompletableFuture<>();
// set host header. include port in value when it is a non-default port
boolean isDefaultPort = (downstreamPort == 80 && !isSecureHttpsCall) || (downstreamPort == 443 && isSecureHttpsCall);
String hostHeaderValue = (isDefaultPort) ? downstreamHost : downstreamHost + ":" + downstreamPort;
initialRequestChunk.headers().set(HttpHeaders.Names.HOST, hostHeaderValue);
long beforeConnectionStartTimeNanos = System.nanoTime();
// Create a connection to the downstream server.
ChannelPool pool = getPooledChannelFuture(downstreamHost, downstreamPort);
Future<Channel> channelFuture = pool.acquire();
// Add a listener that kicks off the downstream call once the connection is completed.
channelFuture.addListener(future -> {
Pair<Deque<Span>, Map<String, String>> originalThreadInfo = null;
try {
long connectionSetupTimeNanos = System.nanoTime() - beforeConnectionStartTimeNanos;
HttpProcessingState httpProcessingState = ChannelAttributes.getHttpProcessingStateForChannel(ctx).get();
if (httpProcessingState != null) {
RequestInfo<?> requestInfo = httpProcessingState.getRequestInfo();
if (requestInfo != null) {
requestInfo.addRequestAttribute(DOWNSTREAM_CALL_CONNECTION_SETUP_TIME_NANOS_REQUEST_ATTR_KEY, connectionSetupTimeNanos);
}
}
// Setup tracing and MDC so our log messages have the correct distributed trace info, etc.
originalThreadInfo = linkTracingAndMdcToCurrentThread(ctx);
if (logger.isDebugEnabled()) {
logger.debug("CONNECTION SETUP TIME NANOS: {}", connectionSetupTimeNanos);
}
if (!future.isSuccess()) {
try {
// We did not connect to the downstream host successfully. Notify the callback.
streamingChannel.completeExceptionally(new WrapperException("Unable to connect to downstream host: " + downstreamHost, future.cause()));
} finally {
Channel ch = channelFuture.getNow();
if (ch != null) {
// We likely will never reach here since the channel future was not successful, however if
// we *do* manage to get here somehow, then mark the channel broken and release it back
// to the pool.
markChannelAsBroken(ch);
pool.release(ch);
}
}
return;
}
// noinspection ConstantConditions
if (performSubSpanAroundDownstreamCalls) {
// Add the subspan.
String spanName = getSubspanSpanName(initialRequestChunk.getMethod().name(), downstreamHost + ":" + downstreamPort + initialRequestChunk.getUri());
if (Tracer.getInstance().getCurrentSpan() == null) {
// There is no parent span to start a subspan from, so we have to start a new span for this call
// rather than a subspan.
// TODO: Set this to CLIENT once we have that ability in the wingtips API for request root spans
Tracer.getInstance().startRequestWithRootSpan(spanName);
} else {
// There was at least one span on the stack, so we can start a subspan for this call.
Tracer.getInstance().startSubSpan(spanName, Span.SpanPurpose.CLIENT);
}
}
Deque<Span> distributedSpanStackToUse = Tracer.getInstance().getCurrentSpanStackCopy();
Map<String, String> mdcContextToUse = MDC.getCopyOfContextMap();
Span spanForDownstreamCall = (distributedSpanStackToUse == null) ? null : distributedSpanStackToUse.peek();
// Add distributed trace headers to the downstream call if desired and we have a current span.
if (addTracingHeadersToDownstreamCall && spanForDownstreamCall != null) {
HttpRequestTracingUtils.propagateTracingHeaders((headerKey, headerValue) -> {
if (headerValue != null) {
initialRequestChunk.headers().set(headerKey, headerValue);
}
}, spanForDownstreamCall);
}
Channel ch = channelFuture.getNow();
if (logger.isDebugEnabled())
logger.debug("Channel ID of the Channel pulled from the pool: {}", ch.toString());
// We may not be in the right thread to modify the channel pipeline and write data. If we're in the
// wrong thread we can get deadlock type situations. By running the relevant bits in the channel's
// event loop we're guaranteed it will be run in the correct thread.
ch.eventLoop().execute(runnableWithTracingAndMdc(() -> {
BiConsumer<String, Throwable> prepChannelErrorHandler = (errorMessage, cause) -> {
try {
streamingChannel.completeExceptionally(new WrapperException(errorMessage, cause));
} finally {
// This channel may be permanently busted depending on the error, so mark it broken and let
// the pool close it and clean it up.
markChannelAsBroken(ch);
pool.release(ch);
}
};
try {
ObjectHolder<Boolean> callActiveHolder = new ObjectHolder<>();
callActiveHolder.heldObject = true;
ObjectHolder<Boolean> lastChunkSentDownstreamHolder = new ObjectHolder<>();
lastChunkSentDownstreamHolder.heldObject = false;
// noinspection ConstantConditions
prepChannelForDownstreamCall(pool, ch, callback, distributedSpanStackToUse, mdcContextToUse, isSecureHttpsCall, relaxedHttpsValidation, performSubSpanAroundDownstreamCalls, downstreamCallTimeoutMillis, callActiveHolder, lastChunkSentDownstreamHolder);
logInitialRequestChunk(initialRequestChunk, downstreamHost, downstreamPort);
// Send the HTTP request.
ChannelFuture writeFuture = ch.writeAndFlush(initialRequestChunk);
// After the initial chunk has been sent we'll open the floodgates
// for any further chunk streaming
writeFuture.addListener(completedWriteFuture -> {
if (completedWriteFuture.isSuccess())
streamingChannel.complete(new StreamingChannel(ch, pool, callActiveHolder, lastChunkSentDownstreamHolder, distributedSpanStackToUse, mdcContextToUse));
else {
prepChannelErrorHandler.accept("Writing the first HttpRequest chunk to the downstream service failed.", completedWriteFuture.cause());
// noinspection UnnecessaryReturnStatement
return;
}
});
} catch (SSLException | NoSuchAlgorithmException | KeyStoreException ex) {
prepChannelErrorHandler.accept("Error setting up SSL context for downstream call", ex);
// noinspection UnnecessaryReturnStatement
return;
} catch (Throwable t) {
// If we don't catch and handle this here it gets swallowed since we're in a Runnable
prepChannelErrorHandler.accept("An unexpected error occurred while prepping the channel pipeline for the downstream call", t);
// noinspection UnnecessaryReturnStatement
return;
}
}, ctx));
} catch (Throwable ex) {
try {
String errorMsg = "Error occurred attempting to send first chunk (headers/etc) downstream";
Exception errorToFire = new WrapperException(errorMsg, ex);
logger.warn(errorMsg, errorToFire);
streamingChannel.completeExceptionally(errorToFire);
} finally {
Channel ch = channelFuture.getNow();
if (ch != null) {
// Depending on where the error was thrown the channel may or may not exist. If it does exist,
// then assume it's unusable, mark it as broken, and let the pool close it and remove it.
markChannelAsBroken(ch);
pool.release(ch);
}
}
} finally {
// Unhook the tracing and MDC stuff from this thread now that we're done.
unlinkTracingAndMdcFromCurrentThread(originalThreadInfo);
}
});
return streamingChannel;
}
use of org.apache.flink.shaded.netty4.io.netty.util.concurrent.Future in project pravega by pravega.
the class ClientConnectionInboundHandler method sendAsync.
@Override
public void sendAsync(List<Append> appends, CompletedCallback callback) {
recentMessage.set(true);
Channel ch;
try {
ch = getChannel();
} catch (ConnectionFailedException e) {
callback.complete(new ConnectionFailedException("Connection to " + connectionName + " is not established."));
return;
}
PromiseCombiner combiner = new PromiseCombiner();
for (Append append : appends) {
batchSizeTracker.recordAppend(append.getEventNumber(), append.getData().readableBytes());
combiner.add(ch.write(append));
}
ch.flush();
ChannelPromise promise = ch.newPromise();
promise.addListener(new GenericFutureListener<Future<? super Void>>() {
@Override
public void operationComplete(Future<? super Void> future) throws Exception {
Throwable cause = future.cause();
callback.complete(cause == null ? null : new ConnectionFailedException(cause));
}
});
combiner.finish(promise);
}
use of org.apache.flink.shaded.netty4.io.netty.util.concurrent.Future in project openzaly by akaxincom.
the class HttpServer method start.
public void start(String address, int port) throws Exception {
try {
ChannelFuture channelFuture = bootstrap.bind(address, port).sync();
channelFuture.channel().closeFuture().addListener(new GenericFutureListener<Future<? super Void>>() {
@Override
public void operationComplete(Future<? super Void> future) throws Exception {
closeGracefylly();
}
});
} catch (Exception e) {
closeGracefylly();
throw new Exception("start http server error", e);
}
}
use of org.apache.flink.shaded.netty4.io.netty.util.concurrent.Future in project drill by apache.
the class WebSessionResourcesTest method testCloseWithListener.
/**
* Validates successful {@link WebSessionResources#close()} with valid CloseFuture and {@link TestClosedListener}
* getting invoked which is added to the close future.
* @throws Exception
*/
@Test
public void testCloseWithListener() throws Exception {
try {
// Assign latch, executor and closeListener for this test case
GenericFutureListener<Future<Void>> closeListener = new TestClosedListener();
latch = new CountDownLatch(1);
executor = TransportCheck.createEventLoopGroup(1, "Test-Thread").next();
Promise<Void> closeFuture = new DefaultPromise(executor);
// create WebSessionResources with above ChannelPromise to notify listener
webSessionResources = new WebSessionResources(mock(BufferAllocator.class), mock(SocketAddress.class), mock(UserSession.class), closeFuture);
// Add the Test Listener to close future
assertTrue(!listenerComplete);
closeFuture.addListener(closeListener);
// Close the WebSessionResources
webSessionResources.close();
// Verify the states
verify(webSessionResources.getAllocator()).close();
verify(webSessionResources.getSession()).close();
assertTrue(webSessionResources.getCloseFuture() == null);
// Since listener will be invoked so test should not wait forever
latch.await();
assertTrue(listenerComplete);
} catch (Exception e) {
fail();
} finally {
listenerComplete = false;
executor.shutdownGracefully();
}
}
Aggregations