use of org.elasticsearch.common.transport.TransportAddress in project elasticsearch by elastic.
the class TransportClientNodesService method addTransportAddresses.
public TransportClientNodesService addTransportAddresses(TransportAddress... transportAddresses) {
synchronized (mutex) {
if (closed) {
throw new IllegalStateException("transport client is closed, can't add an address");
}
List<TransportAddress> filtered = new ArrayList<>(transportAddresses.length);
for (TransportAddress transportAddress : transportAddresses) {
boolean found = false;
for (DiscoveryNode otherNode : listedNodes) {
if (otherNode.getAddress().equals(transportAddress)) {
found = true;
logger.debug("address [{}] already exists with [{}], ignoring...", transportAddress, otherNode);
break;
}
}
if (!found) {
filtered.add(transportAddress);
}
}
if (filtered.isEmpty()) {
return this;
}
List<DiscoveryNode> builder = new ArrayList<>();
builder.addAll(listedNodes());
for (TransportAddress transportAddress : filtered) {
DiscoveryNode node = new DiscoveryNode("#transport#-" + tempNodeIdGenerator.incrementAndGet(), transportAddress, Collections.emptyMap(), Collections.emptySet(), minCompatibilityVersion);
logger.debug("adding address [{}]", node);
builder.add(node);
}
listedNodes = Collections.unmodifiableList(builder);
nodesSampler.sample();
}
return this;
}
use of org.elasticsearch.common.transport.TransportAddress in project elasticsearch by elastic.
the class UnicastZenPing method sendPings.
protected void sendPings(final TimeValue timeout, final PingingRound pingingRound) {
final UnicastPingRequest pingRequest = new UnicastPingRequest();
pingRequest.id = pingingRound.id();
pingRequest.timeout = timeout;
DiscoveryNodes discoNodes = contextProvider.nodes();
pingRequest.pingResponse = createPingResponse(discoNodes);
Set<DiscoveryNode> nodesFromResponses = temporalResponses.stream().map(pingResponse -> {
assert clusterName.equals(pingResponse.clusterName()) : "got a ping request from a different cluster. expected " + clusterName + " got " + pingResponse.clusterName();
return pingResponse.node();
}).collect(Collectors.toSet());
// dedup by address
final Map<TransportAddress, DiscoveryNode> uniqueNodesByAddress = Stream.concat(pingingRound.getSeedNodes().stream(), nodesFromResponses.stream()).collect(Collectors.toMap(DiscoveryNode::getAddress, Function.identity(), (n1, n2) -> n1));
// resolve what we can via the latest cluster state
final Set<DiscoveryNode> nodesToPing = uniqueNodesByAddress.values().stream().map(node -> {
DiscoveryNode foundNode = discoNodes.findByAddress(node.getAddress());
if (foundNode == null) {
return node;
} else {
return foundNode;
}
}).collect(Collectors.toSet());
nodesToPing.forEach(node -> sendPingRequestToNode(node, timeout, pingingRound, pingRequest));
}
use of org.elasticsearch.common.transport.TransportAddress in project elasticsearch by elastic.
the class UnicastZenPing method resolveHostsLists.
/**
* Resolves a list of hosts to a list of discovery nodes. Each host is resolved into a transport address (or a collection of addresses
* if the number of ports is greater than one) and the transport addresses are used to created discovery nodes. Host lookups are done
* in parallel using specified executor service up to the specified resolve timeout.
*
* @param executorService the executor service used to parallelize hostname lookups
* @param logger logger used for logging messages regarding hostname lookups
* @param hosts the hosts to resolve
* @param limitPortCounts the number of ports to resolve (should be 1 for non-local transport)
* @param transportService the transport service
* @param nodeId_prefix a prefix to use for node ids
* @param resolveTimeout the timeout before returning from hostname lookups
* @return a list of discovery nodes with resolved transport addresses
*/
public static List<DiscoveryNode> resolveHostsLists(final ExecutorService executorService, final Logger logger, final List<String> hosts, final int limitPortCounts, final TransportService transportService, final String nodeId_prefix, final TimeValue resolveTimeout) throws InterruptedException {
Objects.requireNonNull(executorService);
Objects.requireNonNull(logger);
Objects.requireNonNull(hosts);
Objects.requireNonNull(transportService);
Objects.requireNonNull(nodeId_prefix);
Objects.requireNonNull(resolveTimeout);
if (resolveTimeout.nanos() < 0) {
throw new IllegalArgumentException("resolve timeout must be non-negative but was [" + resolveTimeout + "]");
}
// create tasks to submit to the executor service; we will wait up to resolveTimeout for these tasks to complete
final List<Callable<TransportAddress[]>> callables = hosts.stream().map(hn -> (Callable<TransportAddress[]>) () -> transportService.addressesFromString(hn, limitPortCounts)).collect(Collectors.toList());
final List<Future<TransportAddress[]>> futures = executorService.invokeAll(callables, resolveTimeout.nanos(), TimeUnit.NANOSECONDS);
final List<DiscoveryNode> discoveryNodes = new ArrayList<>();
final Set<TransportAddress> localAddresses = new HashSet<>();
localAddresses.add(transportService.boundAddress().publishAddress());
localAddresses.addAll(Arrays.asList(transportService.boundAddress().boundAddresses()));
// ExecutorService#invokeAll guarantees that the futures are returned in the iteration order of the tasks so we can associate the
// hostname with the corresponding task by iterating together
final Iterator<String> it = hosts.iterator();
for (final Future<TransportAddress[]> future : futures) {
final String hostname = it.next();
if (!future.isCancelled()) {
assert future.isDone();
try {
final TransportAddress[] addresses = future.get();
logger.trace("resolved host [{}] to {}", hostname, addresses);
for (int addressId = 0; addressId < addresses.length; addressId++) {
final TransportAddress address = addresses[addressId];
// no point in pinging ourselves
if (localAddresses.contains(address) == false) {
discoveryNodes.add(new DiscoveryNode(nodeId_prefix + hostname + "_" + addressId + "#", address, emptyMap(), emptySet(), Version.CURRENT.minimumCompatibilityVersion()));
}
}
} catch (final ExecutionException e) {
assert e.getCause() != null;
final String message = "failed to resolve host [" + hostname + "]";
logger.warn(message, e.getCause());
}
} else {
logger.warn("timed out after [{}] resolving host [{}]", resolveTimeout, hostname);
}
}
return discoveryNodes;
}
use of org.elasticsearch.common.transport.TransportAddress in project elasticsearch by elastic.
the class TransportClientNodesServiceTests method checkRemoveAddress.
private void checkRemoveAddress(boolean sniff) {
Object[] extraSettings = { TransportClient.CLIENT_TRANSPORT_SNIFF.getKey(), sniff };
try (TestIteration iteration = new TestIteration(extraSettings)) {
final TransportClientNodesService service = iteration.transportClientNodesService;
assertEquals(iteration.listNodesCount + iteration.sniffNodesCount, service.connectedNodes().size());
final TransportAddress addressToRemove = randomFrom(iteration.listNodeAddresses);
service.removeTransportAddress(addressToRemove);
assertThat(service.connectedNodes(), everyItem(not(new CustomMatcher<DiscoveryNode>("removed address") {
@Override
public boolean matches(Object item) {
return item instanceof DiscoveryNode && ((DiscoveryNode) item).getAddress().equals(addressToRemove);
}
})));
assertEquals(iteration.listNodesCount + iteration.sniffNodesCount - 1, service.connectedNodes().size());
}
}
use of org.elasticsearch.common.transport.TransportAddress in project elasticsearch by elastic.
the class TransportClientRetryIT method testRetry.
public void testRetry() throws IOException, ExecutionException, InterruptedException {
Iterable<TransportService> instances = internalCluster().getInstances(TransportService.class);
TransportAddress[] addresses = new TransportAddress[internalCluster().size()];
int i = 0;
for (TransportService instance : instances) {
addresses[i++] = instance.boundAddress().publishAddress();
}
Settings.Builder builder = Settings.builder().put("client.transport.nodes_sampler_interval", "1s").put("node.name", "transport_client_retry_test").put(ClusterName.CLUSTER_NAME_SETTING.getKey(), internalCluster().getClusterName()).put(Environment.PATH_HOME_SETTING.getKey(), createTempDir());
try (TransportClient client = new MockTransportClient(builder.build())) {
client.addTransportAddresses(addresses);
assertEquals(client.connectedNodes().size(), internalCluster().size());
int size = cluster().size();
//kill all nodes one by one, leaving a single master/data node at the end of the loop
for (int j = 1; j < size; j++) {
internalCluster().stopRandomNode(input -> true);
ClusterStateRequest clusterStateRequest = Requests.clusterStateRequest().local(true);
ClusterState clusterState;
//use both variants of execute method: with and without listener
if (randomBoolean()) {
clusterState = client.admin().cluster().state(clusterStateRequest).get().getState();
} else {
PlainListenableActionFuture<ClusterStateResponse> future = new PlainListenableActionFuture<>(client.threadPool());
client.admin().cluster().state(clusterStateRequest, future);
clusterState = future.get().getState();
}
assertThat(clusterState.nodes().getSize(), greaterThanOrEqualTo(size - j));
assertThat(client.connectedNodes().size(), greaterThanOrEqualTo(size - j));
}
}
}
Aggregations