use of java.util.concurrent.Callable in project jetty.project by eclipse.
the class WebSocketScopeSessionTest method testMultiSession_Overlapping.
@Test
public void testMultiSession_Overlapping() throws Exception {
final CountDownLatch midLatch = new CountDownLatch(2);
final CountDownLatch end1Latch = new CountDownLatch(1);
Callable<Session> call1 = new Callable<Session>() {
@Override
public Session call() throws Exception {
Session ret = null;
ScopedInstance<WebSocketScopeContext> wsScope1Bean = newInstance(WebSocketScopeContext.class);
WebSocketScopeContext wsScope1 = wsScope1Bean.instance;
wsScope1.create();
try {
// Scope 1
wsScope1.begin();
BogusSession sess = new BogusSession("1");
wsScope1.setSession(sess);
midLatch.countDown();
midLatch.await(1, TimeUnit.SECONDS);
ScopedInstance<BogusSocket> sock1Bean = newInstance(BogusSocket.class);
BogusSocket sock1 = sock1Bean.instance;
assertThat("Socket 1 Session", sock1.getSession(), sameInstance((Session) sess));
ret = sock1.getSession();
sock1Bean.destroy();
} finally {
wsScope1.end();
}
wsScope1.destroy();
wsScope1Bean.destroy();
end1Latch.countDown();
return ret;
}
};
final CountDownLatch end2Latch = new CountDownLatch(1);
Callable<Session> call2 = new Callable<Session>() {
@Override
public Session call() throws Exception {
Session ret = null;
ScopedInstance<WebSocketScopeContext> wsScope2Bean = newInstance(WebSocketScopeContext.class);
WebSocketScopeContext wsScope2 = wsScope2Bean.instance;
wsScope2.create();
try {
// Scope 2
wsScope2.begin();
BogusSession sess = new BogusSession("2");
wsScope2.setSession(sess);
ScopedInstance<BogusSocket> sock2Bean = newInstance(BogusSocket.class);
midLatch.countDown();
midLatch.await(1, TimeUnit.SECONDS);
BogusSocket sock2 = sock2Bean.instance;
ret = sock2.getSession();
assertThat("Socket 2 Session", sock2.getSession(), sameInstance((Session) sess));
sock2Bean.destroy();
} finally {
wsScope2.end();
}
wsScope2.destroy();
wsScope2Bean.destroy();
end2Latch.countDown();
return ret;
}
};
ExecutorService svc = Executors.newFixedThreadPool(4);
Future<Session> fut1 = svc.submit(call1);
Future<Session> fut2 = svc.submit(call2);
Session sess1 = fut1.get(1, TimeUnit.SECONDS);
Session sess2 = fut2.get(1, TimeUnit.SECONDS);
assertThat("Sessions are different", sess1, not(sameInstance(sess2)));
}
use of java.util.concurrent.Callable in project druid by druid-io.
the class LoadingCacheTest method testInvalidateAll.
@Test
public void testInvalidateAll() throws ExecutionException {
loadingCache.get("key2", new Callable() {
@Override
public Object call() throws Exception {
return "value2";
}
});
Assert.assertEquals("value2", loadingCache.getIfPresent("key2"));
loadingCache.invalidateAll(Lists.newArrayList("key2"));
Assert.assertEquals(null, loadingCache.getIfPresent("key2"));
}
use of java.util.concurrent.Callable in project elasticsearch by elastic.
the class UnicastZenPing method resolveHostsLists.
/**
* Resolves a list of hosts to a list of discovery nodes. Each host is resolved into a transport address (or a collection of addresses
* if the number of ports is greater than one) and the transport addresses are used to created discovery nodes. Host lookups are done
* in parallel using specified executor service up to the specified resolve timeout.
*
* @param executorService the executor service used to parallelize hostname lookups
* @param logger logger used for logging messages regarding hostname lookups
* @param hosts the hosts to resolve
* @param limitPortCounts the number of ports to resolve (should be 1 for non-local transport)
* @param transportService the transport service
* @param nodeId_prefix a prefix to use for node ids
* @param resolveTimeout the timeout before returning from hostname lookups
* @return a list of discovery nodes with resolved transport addresses
*/
public static List<DiscoveryNode> resolveHostsLists(final ExecutorService executorService, final Logger logger, final List<String> hosts, final int limitPortCounts, final TransportService transportService, final String nodeId_prefix, final TimeValue resolveTimeout) throws InterruptedException {
Objects.requireNonNull(executorService);
Objects.requireNonNull(logger);
Objects.requireNonNull(hosts);
Objects.requireNonNull(transportService);
Objects.requireNonNull(nodeId_prefix);
Objects.requireNonNull(resolveTimeout);
if (resolveTimeout.nanos() < 0) {
throw new IllegalArgumentException("resolve timeout must be non-negative but was [" + resolveTimeout + "]");
}
// create tasks to submit to the executor service; we will wait up to resolveTimeout for these tasks to complete
final List<Callable<TransportAddress[]>> callables = hosts.stream().map(hn -> (Callable<TransportAddress[]>) () -> transportService.addressesFromString(hn, limitPortCounts)).collect(Collectors.toList());
final List<Future<TransportAddress[]>> futures = executorService.invokeAll(callables, resolveTimeout.nanos(), TimeUnit.NANOSECONDS);
final List<DiscoveryNode> discoveryNodes = new ArrayList<>();
final Set<TransportAddress> localAddresses = new HashSet<>();
localAddresses.add(transportService.boundAddress().publishAddress());
localAddresses.addAll(Arrays.asList(transportService.boundAddress().boundAddresses()));
// ExecutorService#invokeAll guarantees that the futures are returned in the iteration order of the tasks so we can associate the
// hostname with the corresponding task by iterating together
final Iterator<String> it = hosts.iterator();
for (final Future<TransportAddress[]> future : futures) {
final String hostname = it.next();
if (!future.isCancelled()) {
assert future.isDone();
try {
final TransportAddress[] addresses = future.get();
logger.trace("resolved host [{}] to {}", hostname, addresses);
for (int addressId = 0; addressId < addresses.length; addressId++) {
final TransportAddress address = addresses[addressId];
// no point in pinging ourselves
if (localAddresses.contains(address) == false) {
discoveryNodes.add(new DiscoveryNode(nodeId_prefix + hostname + "_" + addressId + "#", address, emptyMap(), emptySet(), Version.CURRENT.minimumCompatibilityVersion()));
}
}
} catch (final ExecutionException e) {
assert e.getCause() != null;
final String message = "failed to resolve host [" + hostname + "]";
logger.warn(message, e.getCause());
}
} else {
logger.warn("timed out after [{}] resolving host [{}]", resolveTimeout, hostname);
}
}
return discoveryNodes;
}
use of java.util.concurrent.Callable in project hadoop by apache.
the class TestInMemorySCMStore method testAddResourceRefAddResourceConcurrency.
@Test
public void testAddResourceRefAddResourceConcurrency() throws Exception {
startEmptyStore();
final String key = "key1";
final String fileName = "foo.jar";
final String user = "user";
final ApplicationId id = createAppId(1, 1L);
// add the resource and add the resource ref at the same time
ExecutorService exec = HadoopExecutors.newFixedThreadPool(2);
final CountDownLatch start = new CountDownLatch(1);
Callable<String> addKeyTask = new Callable<String>() {
public String call() throws Exception {
start.await();
return store.addResource(key, fileName);
}
};
Callable<String> addAppIdTask = new Callable<String>() {
public String call() throws Exception {
start.await();
return store.addResourceReference(key, new SharedCacheResourceReference(id, user));
}
};
Future<String> addAppIdFuture = exec.submit(addAppIdTask);
Future<String> addKeyFuture = exec.submit(addKeyTask);
// start them at the same time
start.countDown();
// get the results
String addKeyResult = addKeyFuture.get();
String addAppIdResult = addAppIdFuture.get();
assertEquals(fileName, addKeyResult);
System.out.println("addAppId() result: " + addAppIdResult);
// it may be null or the fileName depending on the timing
assertTrue(addAppIdResult == null || addAppIdResult.equals(fileName));
exec.shutdown();
}
use of java.util.concurrent.Callable in project hbase by apache.
the class AssignmentManager method onRegionSplit.
private String onRegionSplit(final RegionState current, final HRegionInfo hri, final ServerName serverName, final RegionStateTransition transition) {
// it could be a reportRegionTransition RPC retry.
if (current == null || !current.isSplittingOrSplitOnServer(serverName)) {
return hri.getShortNameToLog() + " is not splitting on " + serverName;
}
// Just return in case of retrying
if (current.isSplit()) {
return null;
}
final HRegionInfo a = HRegionInfo.convert(transition.getRegionInfo(1));
final HRegionInfo b = HRegionInfo.convert(transition.getRegionInfo(2));
RegionState rs_a = regionStates.getRegionState(a);
RegionState rs_b = regionStates.getRegionState(b);
if (rs_a == null || !rs_a.isSplittingNewOnServer(serverName) || rs_b == null || !rs_b.isSplittingNewOnServer(serverName)) {
return "Some daughter is not known to be splitting on " + serverName + ", a=" + rs_a + ", b=" + rs_b;
}
if (TEST_SKIP_SPLIT_HANDLING) {
return "Skipping split message, TEST_SKIP_SPLIT_HANDLING is set";
}
regionOffline(hri, State.SPLIT);
regionOnline(a, serverName, 1);
regionOnline(b, serverName, 1);
// User could disable the table before master knows the new region.
if (getTableStateManager().isTableState(hri.getTable(), TableState.State.DISABLED, TableState.State.DISABLING)) {
invokeUnAssign(a);
invokeUnAssign(b);
} else {
Callable<Object> splitReplicasCallable = new Callable<Object>() {
@Override
public Object call() {
doSplittingOfReplicas(hri, a, b);
return null;
}
};
threadPoolExecutorService.submit(splitReplicasCallable);
}
return null;
}
Aggregations