use of java.util.concurrent.Future in project elasticsearch by elastic.
the class UnicastZenPing method resolveHostsLists.
/**
* Resolves a list of hosts to a list of discovery nodes. Each host is resolved into a transport address (or a collection of addresses
* if the number of ports is greater than one) and the transport addresses are used to created discovery nodes. Host lookups are done
* in parallel using specified executor service up to the specified resolve timeout.
*
* @param executorService the executor service used to parallelize hostname lookups
* @param logger logger used for logging messages regarding hostname lookups
* @param hosts the hosts to resolve
* @param limitPortCounts the number of ports to resolve (should be 1 for non-local transport)
* @param transportService the transport service
* @param nodeId_prefix a prefix to use for node ids
* @param resolveTimeout the timeout before returning from hostname lookups
* @return a list of discovery nodes with resolved transport addresses
*/
public static List<DiscoveryNode> resolveHostsLists(final ExecutorService executorService, final Logger logger, final List<String> hosts, final int limitPortCounts, final TransportService transportService, final String nodeId_prefix, final TimeValue resolveTimeout) throws InterruptedException {
Objects.requireNonNull(executorService);
Objects.requireNonNull(logger);
Objects.requireNonNull(hosts);
Objects.requireNonNull(transportService);
Objects.requireNonNull(nodeId_prefix);
Objects.requireNonNull(resolveTimeout);
if (resolveTimeout.nanos() < 0) {
throw new IllegalArgumentException("resolve timeout must be non-negative but was [" + resolveTimeout + "]");
}
// create tasks to submit to the executor service; we will wait up to resolveTimeout for these tasks to complete
final List<Callable<TransportAddress[]>> callables = hosts.stream().map(hn -> (Callable<TransportAddress[]>) () -> transportService.addressesFromString(hn, limitPortCounts)).collect(Collectors.toList());
final List<Future<TransportAddress[]>> futures = executorService.invokeAll(callables, resolveTimeout.nanos(), TimeUnit.NANOSECONDS);
final List<DiscoveryNode> discoveryNodes = new ArrayList<>();
final Set<TransportAddress> localAddresses = new HashSet<>();
localAddresses.add(transportService.boundAddress().publishAddress());
localAddresses.addAll(Arrays.asList(transportService.boundAddress().boundAddresses()));
// ExecutorService#invokeAll guarantees that the futures are returned in the iteration order of the tasks so we can associate the
// hostname with the corresponding task by iterating together
final Iterator<String> it = hosts.iterator();
for (final Future<TransportAddress[]> future : futures) {
final String hostname = it.next();
if (!future.isCancelled()) {
assert future.isDone();
try {
final TransportAddress[] addresses = future.get();
logger.trace("resolved host [{}] to {}", hostname, addresses);
for (int addressId = 0; addressId < addresses.length; addressId++) {
final TransportAddress address = addresses[addressId];
// no point in pinging ourselves
if (localAddresses.contains(address) == false) {
discoveryNodes.add(new DiscoveryNode(nodeId_prefix + hostname + "_" + addressId + "#", address, emptyMap(), emptySet(), Version.CURRENT.minimumCompatibilityVersion()));
}
}
} catch (final ExecutionException e) {
assert e.getCause() != null;
final String message = "failed to resolve host [" + hostname + "]";
logger.warn(message, e.getCause());
}
} else {
logger.warn("timed out after [{}] resolving host [{}]", resolveTimeout, hostname);
}
}
return discoveryNodes;
}
use of java.util.concurrent.Future in project sonarqube by SonarSource.
the class StopperThread method run.
@Override
public void run() {
ExecutorService executor = Executors.newSingleThreadExecutor();
Future future = executor.submit(new Runnable() {
@Override
public void run() {
monitored.stop();
}
});
try {
future.get(terminationTimeout, TimeUnit.MILLISECONDS);
} catch (Exception e) {
LoggerFactory.getLogger(getClass()).error(String.format("Can not stop in %dms", terminationTimeout), e);
}
executor.shutdownNow();
commands.endWatch();
}
use of java.util.concurrent.Future in project sonarqube by SonarSource.
the class BaseIndexer method index.
public void index(final IndexerTask task) {
final long requestedAt = system2.now();
Future submit = executor.submit(() -> {
if (lastUpdatedAt == -1L) {
lastUpdatedAt = esClient.getMaxFieldValue(indexType, dateFieldName);
}
if (requestedAt > lastUpdatedAt) {
long l = task.index(lastUpdatedAt);
// l can be 0 if no documents were indexed
lastUpdatedAt = Math.max(l, lastUpdatedAt);
}
});
try {
Uninterruptibles.getUninterruptibly(submit);
} catch (ExecutionException e) {
Throwables.propagate(e);
}
}
use of java.util.concurrent.Future in project disruptor by LMAX-Exchange.
the class ThreeToOneSequencedThroughputTest method runDisruptorPass.
@Override
protected long runDisruptorPass() throws Exception {
final CountDownLatch latch = new CountDownLatch(1);
handler.reset(latch, batchEventProcessor.getSequence().get() + ((ITERATIONS / NUM_PUBLISHERS) * NUM_PUBLISHERS));
Future<?>[] futures = new Future[NUM_PUBLISHERS];
for (int i = 0; i < NUM_PUBLISHERS; i++) {
futures[i] = executor.submit(valuePublishers[i]);
}
executor.submit(batchEventProcessor);
long start = System.currentTimeMillis();
cyclicBarrier.await();
for (int i = 0; i < NUM_PUBLISHERS; i++) {
futures[i].get();
}
latch.await();
long opsPerSecond = (ITERATIONS * 1000L) / (System.currentTimeMillis() - start);
batchEventProcessor.halt();
return opsPerSecond;
}
use of java.util.concurrent.Future in project disruptor by LMAX-Exchange.
the class ThreeToThreeSequencedThroughputTest method runDisruptorPass.
@Override
protected long runDisruptorPass() throws Exception {
final CountDownLatch latch = new CountDownLatch(1);
handler.reset(latch, ITERATIONS);
Future<?>[] futures = new Future[NUM_PUBLISHERS];
for (int i = 0; i < NUM_PUBLISHERS; i++) {
futures[i] = executor.submit(valuePublishers[i]);
}
executor.submit(batchEventProcessor);
long start = System.currentTimeMillis();
cyclicBarrier.await();
for (int i = 0; i < NUM_PUBLISHERS; i++) {
futures[i].get();
}
latch.await();
long opsPerSecond = (ITERATIONS * 1000L * ARRAY_SIZE) / (System.currentTimeMillis() - start);
batchEventProcessor.halt();
return opsPerSecond;
}
Aggregations