use of org.xnio.XnioWorker in project baseio by generallycloud.
the class NioTcpChannelTestCase method main.
public static void main(String[] args) throws Exception {
log.info("Test: acceptor");
final CountDownLatch ioLatch = new CountDownLatch(4);
final CountDownLatch closeLatch = new CountDownLatch(2);
final AtomicBoolean clientOpened = new AtomicBoolean();
final AtomicBoolean clientReadOnceOK = new AtomicBoolean();
final AtomicBoolean clientReadDoneOK = new AtomicBoolean();
final AtomicBoolean clientReadTooMuch = new AtomicBoolean();
final AtomicBoolean clientWriteOK = new AtomicBoolean();
final AtomicBoolean serverOpened = new AtomicBoolean();
final AtomicBoolean serverReadOnceOK = new AtomicBoolean();
final AtomicBoolean serverReadDoneOK = new AtomicBoolean();
final AtomicBoolean serverReadTooMuch = new AtomicBoolean();
final AtomicBoolean serverWriteOK = new AtomicBoolean();
final byte[] bytes = "Ummagumma!".getBytes("UTF-8");
final Xnio xnio = Xnio.getInstance("nio");
final XnioWorker worker = xnio.createWorker(OptionMap.create(Options.WORKER_WRITE_THREADS, 2, Options.WORKER_READ_THREADS, 2));
try {
final FutureResult<InetSocketAddress> futureAddressResult = new FutureResult<InetSocketAddress>();
final IoFuture<InetSocketAddress> futureAddress = futureAddressResult.getIoFuture();
worker.acceptStream(new InetSocketAddress(Inet4Address.getByAddress(new byte[] { 127, 0, 0, 1 }), 0), new ChannelListener<ConnectedStreamChannel>() {
private final ByteBuffer inboundBuf = ByteBuffer.allocate(512);
private int readCnt = 0;
private final ByteBuffer outboundBuf = ByteBuffer.wrap(bytes);
public void handleEvent(final ConnectedStreamChannel channel) {
channel.getCloseSetter().set(new ChannelListener<ConnectedStreamChannel>() {
public void handleEvent(final ConnectedStreamChannel channel) {
closeLatch.countDown();
}
});
channel.getReadSetter().set(new ChannelListener<ConnectedStreamChannel>() {
public void handleEvent(final ConnectedStreamChannel channel) {
try {
final int res = channel.read(inboundBuf);
if (res == -1) {
serverReadDoneOK.set(true);
ioLatch.countDown();
channel.shutdownReads();
} else if (res > 0) {
final int ttl = readCnt += res;
if (ttl == bytes.length) {
serverReadOnceOK.set(true);
} else if (ttl > bytes.length) {
serverReadTooMuch.set(true);
IoUtils.safeClose(channel);
return;
}
}
} catch (IOException e) {
log.errorf(e, "Server read failed");
IoUtils.safeClose(channel);
}
}
});
channel.getWriteSetter().set(new ChannelListener<ConnectedStreamChannel>() {
public void handleEvent(final ConnectedStreamChannel channel) {
try {
channel.write(outboundBuf);
if (!outboundBuf.hasRemaining()) {
serverWriteOK.set(true);
Channels.shutdownWritesBlocking(channel);
ioLatch.countDown();
}
} catch (IOException e) {
log.errorf(e, "Server write failed");
IoUtils.safeClose(channel);
}
}
});
channel.resumeReads();
channel.resumeWrites();
serverOpened.set(true);
}
}, new ChannelListener<BoundChannel>() {
public void handleEvent(final BoundChannel channel) {
futureAddressResult.setResult(channel.getLocalAddress(InetSocketAddress.class));
}
}, OptionMap.create(Options.REUSE_ADDRESSES, Boolean.TRUE));
final InetSocketAddress localAddress = futureAddress.get();
worker.connectStream(localAddress, new ChannelListener<ConnectedStreamChannel>() {
private final ByteBuffer inboundBuf = ByteBuffer.allocate(512);
private int readCnt = 0;
private final ByteBuffer outboundBuf = ByteBuffer.wrap(bytes);
public void handleEvent(final ConnectedStreamChannel channel) {
channel.getCloseSetter().set(new ChannelListener<ConnectedStreamChannel>() {
public void handleEvent(final ConnectedStreamChannel channel) {
closeLatch.countDown();
}
});
channel.getReadSetter().set(new ChannelListener<ConnectedStreamChannel>() {
public void handleEvent(final ConnectedStreamChannel channel) {
try {
final int res = channel.read(inboundBuf);
if (res == -1) {
channel.shutdownReads();
clientReadDoneOK.set(true);
ioLatch.countDown();
} else if (res > 0) {
final int ttl = readCnt += res;
if (ttl == bytes.length) {
clientReadOnceOK.set(true);
} else if (ttl > bytes.length) {
clientReadTooMuch.set(true);
IoUtils.safeClose(channel);
return;
}
}
} catch (IOException e) {
log.errorf(e, "Client read failed");
IoUtils.safeClose(channel);
}
}
});
channel.getWriteSetter().set(new ChannelListener<ConnectedStreamChannel>() {
public void handleEvent(final ConnectedStreamChannel channel) {
try {
channel.write(outboundBuf);
if (!outboundBuf.hasRemaining()) {
clientWriteOK.set(true);
Channels.shutdownWritesBlocking(channel);
ioLatch.countDown();
}
} catch (IOException e) {
log.errorf(e, "Client write failed");
IoUtils.safeClose(channel);
}
}
});
channel.resumeReads();
channel.resumeWrites();
clientOpened.set(true);
}
}, null, OptionMap.EMPTY);
// assertTrue("Read timed out", ioLatch.await(500L, TimeUnit.MILLISECONDS));
// assertTrue("Close timed out", closeLatch.await(500L, TimeUnit.MILLISECONDS));
// assertFalse("Client read too much", clientReadTooMuch.get());
// assertTrue("Client read OK", clientReadOnceOK.get());
// assertTrue("Client read done", clientReadDoneOK.get());
// assertTrue("Client write OK", clientWriteOK.get());
// assertFalse("Server read too much", serverReadTooMuch.get());
// assertTrue("Server read OK", serverReadOnceOK.get());
// assertTrue("Server read done", serverReadDoneOK.get());
// assertTrue("Server write OK", serverWriteOK.get());
} finally {
worker.shutdown();
}
}
use of org.xnio.XnioWorker in project undertow by undertow-io.
the class NodePingUtil method pingHost.
/**
* Try to open a socket connection to given address.
*
* @param address the socket address
* @param exchange the http servers exchange
* @param callback the ping callback
* @param options the options
*/
static void pingHost(InetSocketAddress address, HttpServerExchange exchange, PingCallback callback, OptionMap options) {
final XnioIoThread thread = exchange.getIoThread();
final XnioWorker worker = thread.getWorker();
final HostPingTask r = new HostPingTask(address, worker, callback, options);
// Schedule timeout task
scheduleCancelTask(exchange.getIoThread(), r, 5, TimeUnit.SECONDS);
exchange.dispatch(exchange.isInIoThread() ? SameThreadExecutor.INSTANCE : thread, r);
}
use of org.xnio.XnioWorker in project undertow by undertow-io.
the class H2CUpgradeResetTestCase method beforeClass.
/**
* Initializes the server with the H2C handler and adds the echo handler to
* manage the requests.
* @throws IOException Some error
*/
@BeforeClass
public static void beforeClass() throws IOException {
final PathHandler path = new PathHandler().addExactPath(ECHO_PATH, new HttpHandler() {
@Override
public void handleRequest(HttpServerExchange exchange) throws Exception {
sendEchoResponse(exchange);
}
});
server = Undertow.builder().addHttpListener(DefaultServer.getHostPort() + 1, DefaultServer.getHostAddress(), new Http2UpgradeHandler(path)).setSocketOption(Options.REUSE_ADDRESSES, true).build();
server.start();
// Create xnio worker
final Xnio xnio = Xnio.getInstance();
final XnioWorker xnioWorker = xnio.createWorker(null, OptionMap.builder().set(Options.WORKER_IO_THREADS, 8).set(Options.TCP_NODELAY, true).set(Options.KEEP_ALIVE, true).getMap());
worker = xnioWorker;
}
use of org.xnio.XnioWorker in project undertow by undertow-io.
the class HttpClientTestCase method beforeClass.
@BeforeClass
public static void beforeClass() throws IOException {
// Create xnio worker
final Xnio xnio = Xnio.getInstance();
final XnioWorker xnioWorker = xnio.createWorker(null, DEFAULT_OPTIONS);
worker = xnioWorker;
DefaultServer.setRootHandler(new PathHandler().addExactPath(MESSAGE, new HttpHandler() {
@Override
public void handleRequest(HttpServerExchange exchange) throws Exception {
sendMessage(exchange);
}
}).addExactPath(READTIMEOUT, new HttpHandler() {
@Override
public void handleRequest(HttpServerExchange exchange) throws Exception {
exchange.setStatusCode(StatusCodes.OK);
exchange.getResponseHeaders().put(Headers.CONTENT_LENGTH, 5 + "");
StreamSinkChannel responseChannel = exchange.getResponseChannel();
responseChannel.write(ByteBuffer.wrap(new byte[] { 'a', 'b', 'c' }));
responseChannel.flush();
try {
// READ_TIMEOUT set as 600ms on the client side
// On the server side intentionally sleep 2000ms
// to make READ_TIMEOUT happening at client side
Thread.sleep(2000);
} catch (InterruptedException e) {
e.printStackTrace();
}
responseChannel.write(ByteBuffer.wrap(new byte[] { 'd', 'e' }));
responseChannel.close();
}
}).addExactPath(POST, new HttpHandler() {
@Override
public void handleRequest(HttpServerExchange exchange) throws Exception {
exchange.getRequestReceiver().receiveFullString(new Receiver.FullStringCallback() {
@Override
public void handle(HttpServerExchange exchange, String message) {
exchange.getResponseSender().send(message);
}
});
}
}));
}
use of org.xnio.XnioWorker in project undertow by undertow-io.
the class UndertowContainerProvider method getDefaultContainer.
static ServerWebSocketContainer getDefaultContainer() {
if (defaultContainerDisabled) {
return null;
}
if (defaultContainer != null) {
return defaultContainer;
}
synchronized (UndertowContainerProvider.class) {
if (defaultContainer == null) {
// this is not great, as we have no way to control the lifecycle
// but there is not much we can do
// todo: what options should we use here?
ByteBufferPool buffers = new DefaultByteBufferPool(directBuffers, 1024, 100, 12);
defaultContainer = new ServerWebSocketContainer(defaultIntrospector, UndertowContainerProvider.class.getClassLoader(), new Supplier<XnioWorker>() {
volatile XnioWorker worker;
@Override
public XnioWorker get() {
if (worker == null) {
synchronized (this) {
if (worker == null) {
try {
worker = Xnio.getInstance().createWorker(OptionMap.create(Options.THREAD_DAEMON, true));
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
}
return worker;
}
}, buffers, Collections.EMPTY_LIST, !invokeInIoThread);
}
return defaultContainer;
}
}
Aggregations