use of org.elasticsearch.common.transport.TransportAddress in project elasticsearch by elastic.
the class Netty4HttpRequestSizeLimitIT method testLimitsInFlightRequests.
public void testLimitsInFlightRequests() throws Exception {
ensureGreen();
// we use the limit size as a (very) rough indication on how many requests we should sent to hit the limit
int numRequests = LIMIT.bytesAsInt() / 100;
StringBuilder bulkRequest = new StringBuilder();
for (int i = 0; i < numRequests; i++) {
bulkRequest.append("{\"index\": {}}");
bulkRequest.append(System.lineSeparator());
bulkRequest.append("{ \"field\" : \"value\" }");
bulkRequest.append(System.lineSeparator());
}
@SuppressWarnings("unchecked") Tuple<String, CharSequence>[] requests = new Tuple[150];
for (int i = 0; i < requests.length; i++) {
requests[i] = Tuple.tuple("/index/type/_bulk", bulkRequest);
}
HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class);
TransportAddress transportAddress = (TransportAddress) randomFrom(httpServerTransport.boundAddress().boundAddresses());
try (Netty4HttpClient nettyHttpClient = new Netty4HttpClient()) {
Collection<FullHttpResponse> singleResponse = nettyHttpClient.post(transportAddress.address(), requests[0]);
assertThat(singleResponse, hasSize(1));
assertAtLeastOnceExpectedStatus(singleResponse, HttpResponseStatus.OK);
Collection<FullHttpResponse> multipleResponses = nettyHttpClient.post(transportAddress.address(), requests);
assertThat(multipleResponses, hasSize(requests.length));
assertAtLeastOnceExpectedStatus(multipleResponses, HttpResponseStatus.SERVICE_UNAVAILABLE);
}
}
use of org.elasticsearch.common.transport.TransportAddress in project elasticsearch by elastic.
the class Netty4HttpServerPipeliningTests method testThatHttpPipeliningWorksWhenEnabled.
public void testThatHttpPipeliningWorksWhenEnabled() throws Exception {
final Settings settings = Settings.builder().put("http.pipelining", true).put("http.port", "0").build();
try (HttpServerTransport httpServerTransport = new CustomNettyHttpServerTransport(settings)) {
httpServerTransport.start();
final TransportAddress transportAddress = randomFrom(httpServerTransport.boundAddress().boundAddresses());
final int numberOfRequests = randomIntBetween(4, 16);
final List<String> requests = new ArrayList<>(numberOfRequests);
for (int i = 0; i < numberOfRequests; i++) {
if (rarely()) {
requests.add("/slow/" + i);
} else {
requests.add("/" + i);
}
}
try (Netty4HttpClient nettyHttpClient = new Netty4HttpClient()) {
Collection<FullHttpResponse> responses = nettyHttpClient.get(transportAddress.address(), requests.toArray(new String[] {}));
Collection<String> responseBodies = Netty4HttpClient.returnHttpResponseBodies(responses);
assertThat(responseBodies, contains(requests.toArray()));
}
}
}
use of org.elasticsearch.common.transport.TransportAddress in project elasticsearch by elastic.
the class AzureUnicastHostsProvider method buildDynamicNodes.
/**
* We build the list of Nodes from Azure Management API
* Information can be cached using `cloud.azure.refresh_interval` property if needed.
* Setting `cloud.azure.refresh_interval` to `-1` will cause infinite caching.
* Setting `cloud.azure.refresh_interval` to `0` will disable caching (default).
*/
@Override
public List<DiscoveryNode> buildDynamicNodes() {
if (refreshInterval.millis() != 0) {
if (cachedDiscoNodes != null && (refreshInterval.millis() < 0 || (System.currentTimeMillis() - lastRefresh) < refreshInterval.millis())) {
logger.trace("using cache to retrieve node list");
return cachedDiscoNodes;
}
lastRefresh = System.currentTimeMillis();
}
logger.debug("start building nodes list using Azure API");
cachedDiscoNodes = new ArrayList<>();
HostedServiceGetDetailedResponse detailed;
try {
detailed = azureComputeService.getServiceDetails();
} catch (AzureServiceDisableException e) {
logger.debug("Azure discovery service has been disabled. Returning empty list of nodes.");
return cachedDiscoNodes;
} catch (AzureServiceRemoteException e) {
// We got a remote exception
logger.warn("can not get list of azure nodes: [{}]. Returning empty list of nodes.", e.getMessage());
logger.trace("AzureServiceRemoteException caught", e);
return cachedDiscoNodes;
}
InetAddress ipAddress = null;
try {
ipAddress = networkService.resolvePublishHostAddresses(null);
logger.trace("ip of current node: [{}]", ipAddress);
} catch (IOException e) {
// We can't find the publish host address... Hmmm. Too bad :-(
logger.trace("exception while finding ip", e);
}
for (HostedServiceGetDetailedResponse.Deployment deployment : detailed.getDeployments()) {
// We check the deployment slot
if (deployment.getDeploymentSlot() != deploymentSlot) {
logger.debug("current deployment slot [{}] for [{}] is different from [{}]. skipping...", deployment.getDeploymentSlot(), deployment.getName(), deploymentSlot);
continue;
}
// If provided, we check the deployment name
if (Strings.hasLength(deploymentName) && !deploymentName.equals(deployment.getName())) {
logger.debug("current deployment name [{}] different from [{}]. skipping...", deployment.getName(), deploymentName);
continue;
}
// We check current deployment status
if (deployment.getStatus() != DeploymentStatus.Starting && deployment.getStatus() != DeploymentStatus.Deploying && deployment.getStatus() != DeploymentStatus.Running) {
logger.debug("[{}] status is [{}]. skipping...", deployment.getName(), deployment.getStatus());
continue;
}
for (RoleInstance instance : deployment.getRoleInstances()) {
String networkAddress = null;
// Let's detect if we want to use public or private IP
switch(hostType) {
case PRIVATE_IP:
InetAddress privateIp = instance.getIPAddress();
if (privateIp != null) {
if (privateIp.equals(ipAddress)) {
logger.trace("adding ourselves {}", NetworkAddress.format(ipAddress));
}
networkAddress = InetAddresses.toUriString(privateIp);
} else {
logger.trace("no private ip provided. ignoring [{}]...", instance.getInstanceName());
}
break;
case PUBLIC_IP:
for (InstanceEndpoint endpoint : instance.getInstanceEndpoints()) {
if (!publicEndpointName.equals(endpoint.getName())) {
logger.trace("ignoring endpoint [{}] as different than [{}]", endpoint.getName(), publicEndpointName);
continue;
}
networkAddress = NetworkAddress.format(new InetSocketAddress(endpoint.getVirtualIPAddress(), endpoint.getPort()));
}
if (networkAddress == null) {
logger.trace("no public ip provided. ignoring [{}]...", instance.getInstanceName());
}
break;
default:
// This could never happen!
logger.warn("undefined host_type [{}]. Please check your settings.", hostType);
return cachedDiscoNodes;
}
if (networkAddress == null) {
// We have a bad parameter here or not enough information from azure
logger.warn("no network address found. ignoring [{}]...", instance.getInstanceName());
continue;
}
try {
// we only limit to 1 port per address, makes no sense to ping 100 ports
TransportAddress[] addresses = transportService.addressesFromString(networkAddress, 1);
for (TransportAddress address : addresses) {
logger.trace("adding {}, transport_address {}", networkAddress, address);
cachedDiscoNodes.add(new DiscoveryNode("#cloud-" + instance.getInstanceName(), address, emptyMap(), emptySet(), Version.CURRENT.minimumCompatibilityVersion()));
}
} catch (Exception e) {
logger.warn("can not convert [{}] to transport address. skipping. [{}]", networkAddress, e.getMessage());
}
}
}
logger.debug("{} node(s) added", cachedDiscoNodes.size());
return cachedDiscoNodes;
}
use of org.elasticsearch.common.transport.TransportAddress in project elasticsearch by elastic.
the class Netty4TransportMultiPortIntegrationIT method testThatTransportClientCanConnect.
public void testThatTransportClientCanConnect() throws Exception {
Settings settings = Settings.builder().put("cluster.name", internalCluster().getClusterName()).put(NetworkModule.TRANSPORT_TYPE_KEY, Netty4Plugin.NETTY_TRANSPORT_NAME).put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build();
try (TransportClient transportClient = new MockTransportClient(settings, Netty4Plugin.class)) {
transportClient.addTransportAddress(new TransportAddress(InetAddress.getByName("127.0.0.1"), randomPort));
ClusterHealthResponse response = transportClient.admin().cluster().prepareHealth().get();
assertThat(response.getStatus(), is(ClusterHealthStatus.GREEN));
}
}
use of org.elasticsearch.common.transport.TransportAddress in project elasticsearch by elastic.
the class Netty4SizeHeaderFrameDecoderTests method startThreadPool.
@Before
public void startThreadPool() {
threadPool = new ThreadPool(settings);
NetworkService networkService = new NetworkService(settings, Collections.emptyList());
BigArrays bigArrays = new MockBigArrays(Settings.EMPTY, new NoneCircuitBreakerService());
nettyTransport = new Netty4Transport(settings, threadPool, networkService, bigArrays, new NamedWriteableRegistry(Collections.emptyList()), new NoneCircuitBreakerService());
nettyTransport.start();
TransportAddress[] boundAddresses = nettyTransport.boundAddress().boundAddresses();
TransportAddress transportAddress = (TransportAddress) randomFrom(boundAddresses);
port = transportAddress.address().getPort();
host = transportAddress.address().getAddress();
}
Aggregations