use of java.util.concurrent.ExecutorService in project jetty.project by eclipse.
the class WebSocketScopeSessionTest method testMultiSession_Overlapping.
@Test
public void testMultiSession_Overlapping() throws Exception {
final CountDownLatch midLatch = new CountDownLatch(2);
final CountDownLatch end1Latch = new CountDownLatch(1);
Callable<Session> call1 = new Callable<Session>() {
@Override
public Session call() throws Exception {
Session ret = null;
ScopedInstance<WebSocketScopeContext> wsScope1Bean = newInstance(WebSocketScopeContext.class);
WebSocketScopeContext wsScope1 = wsScope1Bean.instance;
wsScope1.create();
try {
// Scope 1
wsScope1.begin();
BogusSession sess = new BogusSession("1");
wsScope1.setSession(sess);
midLatch.countDown();
midLatch.await(1, TimeUnit.SECONDS);
ScopedInstance<BogusSocket> sock1Bean = newInstance(BogusSocket.class);
BogusSocket sock1 = sock1Bean.instance;
assertThat("Socket 1 Session", sock1.getSession(), sameInstance((Session) sess));
ret = sock1.getSession();
sock1Bean.destroy();
} finally {
wsScope1.end();
}
wsScope1.destroy();
wsScope1Bean.destroy();
end1Latch.countDown();
return ret;
}
};
final CountDownLatch end2Latch = new CountDownLatch(1);
Callable<Session> call2 = new Callable<Session>() {
@Override
public Session call() throws Exception {
Session ret = null;
ScopedInstance<WebSocketScopeContext> wsScope2Bean = newInstance(WebSocketScopeContext.class);
WebSocketScopeContext wsScope2 = wsScope2Bean.instance;
wsScope2.create();
try {
// Scope 2
wsScope2.begin();
BogusSession sess = new BogusSession("2");
wsScope2.setSession(sess);
ScopedInstance<BogusSocket> sock2Bean = newInstance(BogusSocket.class);
midLatch.countDown();
midLatch.await(1, TimeUnit.SECONDS);
BogusSocket sock2 = sock2Bean.instance;
ret = sock2.getSession();
assertThat("Socket 2 Session", sock2.getSession(), sameInstance((Session) sess));
sock2Bean.destroy();
} finally {
wsScope2.end();
}
wsScope2.destroy();
wsScope2Bean.destroy();
end2Latch.countDown();
return ret;
}
};
ExecutorService svc = Executors.newFixedThreadPool(4);
Future<Session> fut1 = svc.submit(call1);
Future<Session> fut2 = svc.submit(call2);
Session sess1 = fut1.get(1, TimeUnit.SECONDS);
Session sess2 = fut2.get(1, TimeUnit.SECONDS);
assertThat("Sessions are different", sess1, not(sameInstance(sess2)));
}
use of java.util.concurrent.ExecutorService in project jetty.project by eclipse.
the class SlowClientsTest method testSlowClientsWithSmallThreadPool.
@Test(timeout = 10000)
public void testSlowClientsWithSmallThreadPool() throws Exception {
File keystore = MavenTestingUtils.getTestResourceFile("keystore");
SslContextFactory sslContextFactory = new SslContextFactory();
sslContextFactory.setKeyStorePath(keystore.getAbsolutePath());
sslContextFactory.setKeyStorePassword("storepwd");
sslContextFactory.setKeyManagerPassword("keypwd");
int maxThreads = 6;
int contentLength = 8 * 1024 * 1024;
QueuedThreadPool serverThreads = new QueuedThreadPool(maxThreads);
serverThreads.setDetailedDump(true);
Server server = new Server(serverThreads);
try {
ServerConnector connector = new ServerConnector(server, 1, 1, sslContextFactory);
connector.setPort(8888);
server.addConnector(connector);
server.setHandler(new AbstractHandler() {
@Override
public void handle(String target, Request baseRequest, HttpServletRequest request, HttpServletResponse response) throws IOException, ServletException {
baseRequest.setHandled(true);
logger.info("SERVING {}", target);
// Write some big content.
response.getOutputStream().write(new byte[contentLength]);
logger.info("SERVED {}", target);
}
});
server.start();
SSLContext sslContext = sslContextFactory.getSslContext();
CompletableFuture[] futures = new CompletableFuture[2 * maxThreads];
ExecutorService executor = Executors.newFixedThreadPool(futures.length);
for (int i = 0; i < futures.length; i++) {
int k = i;
futures[i] = CompletableFuture.runAsync(() -> {
try (SSLSocket socket = (SSLSocket) sslContext.getSocketFactory().createSocket("localhost", connector.getLocalPort())) {
socket.setSoTimeout(contentLength / 1024);
OutputStream output = socket.getOutputStream();
String target = "/" + k;
String request = "GET " + target + " HTTP/1.1\r\n" + "Host: localhost\r\n" + "Connection: close\r\n" + "\r\n";
output.write(request.getBytes(StandardCharsets.UTF_8));
output.flush();
while (serverThreads.getIdleThreads() > 0) Thread.sleep(50);
InputStream input = socket.getInputStream();
while (true) {
int read = input.read();
if (read < 0)
break;
}
logger.info("FINISHED {}", target);
} catch (IOException x) {
throw new UncheckedIOException(x);
} catch (InterruptedException x) {
throw new UncheckedIOException(new InterruptedIOException());
}
}, executor);
}
CompletableFuture.allOf(futures).join();
} finally {
server.stop();
}
}
use of java.util.concurrent.ExecutorService in project dropwizard by dropwizard.
the class DBIHealthCheckTest method testItTimesOutProperly.
@Test
public void testItTimesOutProperly() throws Exception {
String validationQuery = "select 1";
DBI dbi = mock(DBI.class);
Handle handle = mock(Handle.class);
when(dbi.open()).thenReturn(handle);
Mockito.doAnswer(invocation -> {
try {
TimeUnit.SECONDS.sleep(10);
} catch (Exception ignored) {
}
return null;
}).when(handle).execute(validationQuery);
ExecutorService executorService = Executors.newSingleThreadExecutor();
DBIHealthCheck dbiHealthCheck = new DBIHealthCheck(executorService, Duration.milliseconds(5), dbi, validationQuery);
HealthCheck.Result result = dbiHealthCheck.check();
executorService.shutdown();
assertThat("is unhealthy", false, is(result.isHealthy()));
}
use of java.util.concurrent.ExecutorService in project elasticsearch by elastic.
the class FixedExecutorBuilder method build.
@Override
ThreadPool.ExecutorHolder build(final FixedExecutorSettings settings, final ThreadContext threadContext) {
int size = settings.size;
int queueSize = settings.queueSize;
final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(EsExecutors.threadName(settings.nodeName, name()));
final ExecutorService executor = EsExecutors.newFixed(name(), size, queueSize, threadFactory, threadContext);
final ThreadPool.Info info = new ThreadPool.Info(name(), ThreadPool.ThreadPoolType.FIXED, size, size, null, queueSize < 0 ? null : new SizeValue(queueSize));
return new ThreadPool.ExecutorHolder(executor, info);
}
use of java.util.concurrent.ExecutorService in project elasticsearch by elastic.
the class UnicastZenPing method resolveHostsLists.
/**
* Resolves a list of hosts to a list of discovery nodes. Each host is resolved into a transport address (or a collection of addresses
* if the number of ports is greater than one) and the transport addresses are used to created discovery nodes. Host lookups are done
* in parallel using specified executor service up to the specified resolve timeout.
*
* @param executorService the executor service used to parallelize hostname lookups
* @param logger logger used for logging messages regarding hostname lookups
* @param hosts the hosts to resolve
* @param limitPortCounts the number of ports to resolve (should be 1 for non-local transport)
* @param transportService the transport service
* @param nodeId_prefix a prefix to use for node ids
* @param resolveTimeout the timeout before returning from hostname lookups
* @return a list of discovery nodes with resolved transport addresses
*/
public static List<DiscoveryNode> resolveHostsLists(final ExecutorService executorService, final Logger logger, final List<String> hosts, final int limitPortCounts, final TransportService transportService, final String nodeId_prefix, final TimeValue resolveTimeout) throws InterruptedException {
Objects.requireNonNull(executorService);
Objects.requireNonNull(logger);
Objects.requireNonNull(hosts);
Objects.requireNonNull(transportService);
Objects.requireNonNull(nodeId_prefix);
Objects.requireNonNull(resolveTimeout);
if (resolveTimeout.nanos() < 0) {
throw new IllegalArgumentException("resolve timeout must be non-negative but was [" + resolveTimeout + "]");
}
// create tasks to submit to the executor service; we will wait up to resolveTimeout for these tasks to complete
final List<Callable<TransportAddress[]>> callables = hosts.stream().map(hn -> (Callable<TransportAddress[]>) () -> transportService.addressesFromString(hn, limitPortCounts)).collect(Collectors.toList());
final List<Future<TransportAddress[]>> futures = executorService.invokeAll(callables, resolveTimeout.nanos(), TimeUnit.NANOSECONDS);
final List<DiscoveryNode> discoveryNodes = new ArrayList<>();
final Set<TransportAddress> localAddresses = new HashSet<>();
localAddresses.add(transportService.boundAddress().publishAddress());
localAddresses.addAll(Arrays.asList(transportService.boundAddress().boundAddresses()));
// ExecutorService#invokeAll guarantees that the futures are returned in the iteration order of the tasks so we can associate the
// hostname with the corresponding task by iterating together
final Iterator<String> it = hosts.iterator();
for (final Future<TransportAddress[]> future : futures) {
final String hostname = it.next();
if (!future.isCancelled()) {
assert future.isDone();
try {
final TransportAddress[] addresses = future.get();
logger.trace("resolved host [{}] to {}", hostname, addresses);
for (int addressId = 0; addressId < addresses.length; addressId++) {
final TransportAddress address = addresses[addressId];
// no point in pinging ourselves
if (localAddresses.contains(address) == false) {
discoveryNodes.add(new DiscoveryNode(nodeId_prefix + hostname + "_" + addressId + "#", address, emptyMap(), emptySet(), Version.CURRENT.minimumCompatibilityVersion()));
}
}
} catch (final ExecutionException e) {
assert e.getCause() != null;
final String message = "failed to resolve host [" + hostname + "]";
logger.warn(message, e.getCause());
}
} else {
logger.warn("timed out after [{}] resolving host [{}]", resolveTimeout, hostname);
}
}
return discoveryNodes;
}
Aggregations