use of org.apache.http.HttpHost in project elasticsearch by elastic.
the class RestHighLevelClientTests method testWrapResponseListenerOnResponseExceptionWithIgnores.
public void testWrapResponseListenerOnResponseExceptionWithIgnores() throws IOException {
TrackingActionListener trackingActionListener = new TrackingActionListener();
ResponseListener responseListener = restHighLevelClient.wrapResponseListener(response -> response.getStatusLine().getStatusCode(), trackingActionListener, Collections.singleton(404));
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND));
Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
ResponseException responseException = new ResponseException(response);
responseListener.onFailure(responseException);
//although we got an exception, we turn it into a successful response because the status code was provided among ignores
assertNull(trackingActionListener.exception.get());
assertEquals(404, trackingActionListener.statusCode.get());
}
use of org.apache.http.HttpHost in project elasticsearch by elastic.
the class RestClient method nextHost.
/**
* Returns an {@link Iterable} of hosts to be used for a request call.
* Ideally, the first host is retrieved from the iterable and used successfully for the request.
* Otherwise, after each failure the next host has to be retrieved from the iterator so that the request can be retried until
* there are no more hosts available to retry against. The maximum total of attempts is equal to the number of hosts in the iterable.
* The iterator returned will never be empty. In case there are no healthy hosts available, or dead ones to be be retried,
* one dead host gets returned so that it can be retried.
*/
private HostTuple<Iterator<HttpHost>> nextHost() {
final HostTuple<Set<HttpHost>> hostTuple = this.hostTuple;
Collection<HttpHost> nextHosts = Collections.emptySet();
do {
Set<HttpHost> filteredHosts = new HashSet<>(hostTuple.hosts);
for (Map.Entry<HttpHost, DeadHostState> entry : blacklist.entrySet()) {
if (System.nanoTime() - entry.getValue().getDeadUntilNanos() < 0) {
filteredHosts.remove(entry.getKey());
}
}
if (filteredHosts.isEmpty()) {
//last resort: if there are no good host to use, return a single dead one, the one that's closest to being retried
List<Map.Entry<HttpHost, DeadHostState>> sortedHosts = new ArrayList<>(blacklist.entrySet());
if (sortedHosts.size() > 0) {
Collections.sort(sortedHosts, new Comparator<Map.Entry<HttpHost, DeadHostState>>() {
@Override
public int compare(Map.Entry<HttpHost, DeadHostState> o1, Map.Entry<HttpHost, DeadHostState> o2) {
return Long.compare(o1.getValue().getDeadUntilNanos(), o2.getValue().getDeadUntilNanos());
}
});
HttpHost deadHost = sortedHosts.get(0).getKey();
logger.trace("resurrecting host [" + deadHost + "]");
nextHosts = Collections.singleton(deadHost);
}
} else {
List<HttpHost> rotatedHosts = new ArrayList<>(filteredHosts);
Collections.rotate(rotatedHosts, rotatedHosts.size() - lastHostIndex.getAndIncrement());
nextHosts = rotatedHosts;
}
} while (nextHosts.isEmpty());
return new HostTuple<>(nextHosts.iterator(), hostTuple.authCache);
}
use of org.apache.http.HttpHost in project elasticsearch by elastic.
the class SyncResponseListenerTests method mockResponse.
private static Response mockResponse() {
ProtocolVersion protocolVersion = new ProtocolVersion("HTTP", 1, 1);
RequestLine requestLine = new BasicRequestLine("GET", "/", protocolVersion);
StatusLine statusLine = new BasicStatusLine(protocolVersion, 200, "OK");
HttpResponse httpResponse = new BasicHttpResponse(statusLine);
return new Response(requestLine, new HttpHost("localhost", 9200), httpResponse);
}
use of org.apache.http.HttpHost in project elasticsearch by elastic.
the class ElasticsearchHostsSniffer method readHost.
private static HttpHost readHost(String nodeId, JsonParser parser, Scheme scheme) throws IOException {
HttpHost httpHost = null;
String fieldName = null;
while (parser.nextToken() != JsonToken.END_OBJECT) {
if (parser.getCurrentToken() == JsonToken.FIELD_NAME) {
fieldName = parser.getCurrentName();
} else if (parser.getCurrentToken() == JsonToken.START_OBJECT) {
if ("http".equals(fieldName)) {
while (parser.nextToken() != JsonToken.END_OBJECT) {
if (parser.getCurrentToken() == JsonToken.VALUE_STRING && "publish_address".equals(parser.getCurrentName())) {
URI boundAddressAsURI = URI.create(scheme + "://" + parser.getValueAsString());
httpHost = new HttpHost(boundAddressAsURI.getHost(), boundAddressAsURI.getPort(), boundAddressAsURI.getScheme());
} else if (parser.getCurrentToken() == JsonToken.START_OBJECT) {
parser.skipChildren();
}
}
} else {
parser.skipChildren();
}
}
}
//http section is not present if http is not enabled on the node, ignore such nodes
if (httpHost == null) {
logger.debug("skipping node [" + nodeId + "] with http disabled");
return null;
}
return httpHost;
}
use of org.apache.http.HttpHost in project elasticsearch by elastic.
the class ElasticsearchHostsSniffer method readHosts.
private List<HttpHost> readHosts(HttpEntity entity) throws IOException {
try (InputStream inputStream = entity.getContent()) {
JsonParser parser = jsonFactory.createParser(inputStream);
if (parser.nextToken() != JsonToken.START_OBJECT) {
throw new IOException("expected data to start with an object");
}
List<HttpHost> hosts = new ArrayList<>();
while (parser.nextToken() != JsonToken.END_OBJECT) {
if (parser.getCurrentToken() == JsonToken.START_OBJECT) {
if ("nodes".equals(parser.getCurrentName())) {
while (parser.nextToken() != JsonToken.END_OBJECT) {
JsonToken token = parser.nextToken();
assert token == JsonToken.START_OBJECT;
String nodeId = parser.getCurrentName();
HttpHost sniffedHost = readHost(nodeId, parser, this.scheme);
if (sniffedHost != null) {
logger.trace("adding node [" + nodeId + "]");
hosts.add(sniffedHost);
}
}
} else {
parser.skipChildren();
}
}
}
return hosts;
}
}
Aggregations