use of org.apache.flink.shaded.netty4.io.netty.util.concurrent.Future in project bookkeeper by apache.
the class BookieRequestProcessor method processStartTLSRequestV3.
private void processStartTLSRequestV3(final BookkeeperProtocol.Request r, final Channel c) {
BookkeeperProtocol.Response.Builder response = BookkeeperProtocol.Response.newBuilder();
BookkeeperProtocol.BKPacketHeader.Builder header = BookkeeperProtocol.BKPacketHeader.newBuilder();
header.setVersion(BookkeeperProtocol.ProtocolVersion.VERSION_THREE);
header.setOperation(r.getHeader().getOperation());
header.setTxnId(r.getHeader().getTxnId());
response.setHeader(header.build());
if (shFactory == null) {
LOG.error("Got StartTLS request but TLS not configured");
response.setStatus(BookkeeperProtocol.StatusCode.EBADREQ);
c.writeAndFlush(response.build());
} else {
// there is no need to execute in a different thread as this operation is light
SslHandler sslHandler = shFactory.newTLSHandler();
c.pipeline().addFirst("tls", sslHandler);
response.setStatus(BookkeeperProtocol.StatusCode.EOK);
BookkeeperProtocol.StartTLSResponse.Builder builder = BookkeeperProtocol.StartTLSResponse.newBuilder();
response.setStartTLSResponse(builder.build());
sslHandler.handshakeFuture().addListener(new GenericFutureListener<Future<Channel>>() {
@Override
public void operationComplete(Future<Channel> future) throws Exception {
// notify the AuthPlugin the completion of the handshake, even in case of failure
AuthHandler.ServerSideHandler authHandler = c.pipeline().get(AuthHandler.ServerSideHandler.class);
authHandler.authProvider.onProtocolUpgrade();
/*
* Success of the future doesn't guarantee success in authentication
* future.isSuccess() only checks if the result field is not null
*/
if (future.isSuccess() && authHandler.isAuthenticated()) {
LOG.info("Session is protected by: {}", sslHandler.engine().getSession().getCipherSuite());
} else {
if (future.isSuccess()) {
LOG.error("TLS Handshake failed: Could not authenticate.");
} else {
LOG.error("TLS Handshake failure: ", future.cause());
}
BookkeeperProtocol.Response.Builder errResponse = BookkeeperProtocol.Response.newBuilder().setHeader(r.getHeader()).setStatus(BookkeeperProtocol.StatusCode.EIO);
c.writeAndFlush(errResponse.build());
if (statsEnabled) {
bkStats.getOpStats(BKStats.STATS_UNKNOWN).incrementFailedOps();
}
}
}
});
c.writeAndFlush(response.build());
}
}
use of org.apache.flink.shaded.netty4.io.netty.util.concurrent.Future in project bgpcep by opendaylight.
the class BgpDeployerTest method setUp.
@Override
@Before
public void setUp() throws Exception {
super.setUp();
doReturn("mapping").when(tableTypeRegistry).toString();
doReturn(TABLE_TYPE).when(tableTypeRegistry).getTableType(any());
doReturn(TABLES_KEY).when(tableTypeRegistry).getTableKey(any());
final ClusterSingletonServiceRegistration serviceRegistration = mock(ClusterSingletonServiceRegistration.class);
doReturn(serviceRegistration).when(singletonServiceProvider).registerClusterSingletonService(any());
doNothing().when(serviceRegistration).close();
final Future future = mock(BGPReconnectPromise.class);
doReturn(true).when(future).cancel(true);
doReturn(future).when(dispatcher).createReconnectingClient(any(), any(), anyInt(), any());
deployer = spy(new DefaultBgpDeployer(NETWORK_INSTANCE_NAME, singletonServiceProvider, rpcRegistry, extensionContext, dispatcher, new DefaultBGPRibRoutingPolicyFactory(getDataBroker(), new StatementRegistry()), codecsRegistry, getDomBroker(), getDataBroker(), tableTypeRegistry, stateProviderRegistry));
bgpSingletonObtainedLatch = new CountDownLatch(1);
doAnswer(invocationOnMock -> {
final BGPClusterSingletonService real = (BGPClusterSingletonService) invocationOnMock.callRealMethod();
if (spiedBgpSingletonService == null) {
spiedBgpSingletonService = spy(real);
}
bgpSingletonObtainedLatch.countDown();
return spiedBgpSingletonService;
}).when(deployer).getBgpClusterSingleton(any());
}
use of org.apache.flink.shaded.netty4.io.netty.util.concurrent.Future in project cdap by caskdata.
the class ServiceSocksServerConnectHandler method createForwardingChannelHandler.
@Override
protected Future<RelayChannelHandler> createForwardingChannelHandler(Channel inboundChannel, String destAddress, int destPort) {
Promise<RelayChannelHandler> promise = new DefaultPromise<>(inboundChannel.eventLoop());
// Creates a bootstrap for connecting to the target service
ChannelGroup channels = new DefaultChannelGroup(inboundChannel.eventLoop());
Bootstrap bootstrap = new Bootstrap().group(inboundChannel.eventLoop()).channel(NioSocketChannel.class).option(ChannelOption.SO_KEEPALIVE, true).handler(new ChannelInboundHandlerAdapter() {
@Override
public void channelActive(ChannelHandlerContext ctx) {
channels.add(ctx.channel());
// When the outbound connection is active, adds the relay channel handler for the current pipeline,
// which is for relaying traffic coming back from outbound connection.
// Also complete the relay channel handler future, which is for relaying traffic from inbound to outbound.
ctx.pipeline().addLast(new SimpleRelayChannelHandler(inboundChannel));
promise.setSuccess(new SimpleRelayChannelHandler(ctx.channel()));
}
});
// Discover the target address
Promise<Discoverable> discoverablePromise = new DefaultPromise<>(inboundChannel.eventLoop());
Cancellable cancellable = discoveryServiceClient.discover(destAddress).watchChanges(serviceDiscovered -> {
// If it is discovered, make a connection and complete the channel handler future
Discoverable discoverable = new RandomEndpointStrategy(() -> serviceDiscovered).pick();
if (discoverable != null) {
discoverablePromise.setSuccess(discoverable);
}
}, inboundChannel.eventLoop());
// When discovery completed successfully, connect to the destination
discoverablePromise.addListener((GenericFutureListener<Future<Discoverable>>) discoverableFuture -> {
cancellable.cancel();
if (discoverableFuture.isSuccess()) {
Discoverable discoverable = discoverableFuture.get();
bootstrap.connect(discoverable.getSocketAddress()).addListener((ChannelFutureListener) channelFuture -> {
if (!channelFuture.isSuccess()) {
promise.setFailure(channelFuture.cause());
}
});
} else {
promise.setFailure(discoverableFuture.cause());
}
});
// On inbound channel close, close all outbound channels.
// Also cancel the watch since it is no longer needed.
// This is to handle case where discovery never return an endpoint before client connection timeout
inboundChannel.closeFuture().addListener((ChannelFutureListener) future -> {
cancellable.cancel();
channels.close();
});
return promise;
}
use of org.apache.flink.shaded.netty4.io.netty.util.concurrent.Future in project openflowplugin by opendaylight.
the class OFDatagramPacketEncoder method encode.
@Override
@SuppressWarnings("checkstyle:IllegalCatch")
protected void encode(final ChannelHandlerContext ctx, final UdpMessageListenerWrapper wrapper, final List<Object> out) throws Exception {
LOG.trace("Encoding");
try {
ByteBuf buffer = PooledByteBufAllocator.DEFAULT.buffer();
serializationFactory.messageToBuffer(wrapper.getMsg().getVersion(), buffer, wrapper.getMsg());
out.add(new DatagramPacket(buffer, wrapper.getAddress()));
} catch (RuntimeException e) {
LOG.warn("Message serialization failed: {}", e.getMessage());
Future<Void> newFailedFuture = ctx.newFailedFuture(e);
wrapper.getListener().operationComplete(newFailedFuture);
return;
}
}
use of org.apache.flink.shaded.netty4.io.netty.util.concurrent.Future in project openflowplugin by opendaylight.
the class ChannelOutboundQueue method flush.
/*
* The synchronized keyword should be unnecessary, really, but it enforces
* queue order should something go terribly wrong. It should be completely
* uncontended.
*/
private synchronized void flush() {
final long start = System.nanoTime();
final long deadline = start + maxWorkTime;
LOG.debug("Dequeuing messages to channel {}", channel);
long messages = 0;
for (; ; ++messages) {
if (!channel.isWritable()) {
LOG.trace("Channel is no longer writable");
break;
}
final MessageHolder<?> h = queue.poll();
if (h == null) {
LOG.trace("The queue is completely drained");
break;
}
final GenericFutureListener<Future<Void>> l = h.takeListener();
final ChannelFuture p;
if (address == null) {
p = channel.write(new MessageListenerWrapper(h.takeMessage(), l));
} else {
p = channel.write(new UdpMessageListenerWrapper(h.takeMessage(), l, address));
}
if (l != null) {
p.addListener(l);
}
/*
* Check every WORKTIME_RECHECK_MSGS for exceeded time.
*
* XXX: given we already measure our flushing throughput, we
* should be able to perform dynamic adjustments here.
* is that additional complexity needed, though?
*/
if (messages % WORKTIME_RECHECK_MSGS == 0 && System.nanoTime() >= deadline) {
LOG.trace("Exceeded allotted work time {}us", TimeUnit.NANOSECONDS.toMicros(maxWorkTime));
break;
}
}
if (messages > 0) {
LOG.debug("Flushing {} message(s) to channel {}", messages, channel);
channel.flush();
}
if (LOG.isDebugEnabled()) {
final long stop = System.nanoTime();
LOG.debug("Flushed {} messages in {}us to channel {}", messages, TimeUnit.NANOSECONDS.toMicros(stop - start), channel);
}
/*
* We are almost ready to terminate. This is a bit tricky, because
* we do not want to have a race window where a message would be
* stuck on the queue without a flush being scheduled.
*
* So we mark ourselves as not running and then re-check if a
* flush out is needed. That will re-synchronized with other threads
* such that only one flush is scheduled at any given time.
*/
if (!FLUSH_SCHEDULED_UPDATER.compareAndSet(this, 1, 0)) {
LOG.warn("Channel {} queue {} flusher found unscheduled", channel, queue);
}
conditionalFlush();
}
Aggregations