use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.
the class RemoteScrollableHitSource method execute.
private <T> void execute(String method, String uri, Map<String, String> params, HttpEntity entity, BiFunction<XContentParser, XContentType, T> parser, Consumer<? super T> listener) {
// Preserve the thread context so headers survive after the call
java.util.function.Supplier<ThreadContext.StoredContext> contextSupplier = threadPool.getThreadContext().newRestorableContext(true);
class RetryHelper extends AbstractRunnable {
private final Iterator<TimeValue> retries = backoffPolicy.iterator();
@Override
protected void doRun() throws Exception {
client.performRequestAsync(method, uri, params, entity, new ResponseListener() {
@Override
public void onSuccess(org.elasticsearch.client.Response response) {
// Restore the thread context to get the precious headers
try (ThreadContext.StoredContext ctx = contextSupplier.get()) {
// eliminates compiler warning
assert ctx != null;
T parsedResponse;
try {
HttpEntity responseEntity = response.getEntity();
InputStream content = responseEntity.getContent();
XContentType xContentType = null;
if (responseEntity.getContentType() != null) {
final String mimeType = ContentType.parse(responseEntity.getContentType().getValue()).getMimeType();
xContentType = XContentType.fromMediaType(mimeType);
}
if (xContentType == null) {
try {
throw new ElasticsearchException("Response didn't include Content-Type: " + bodyMessage(response.getEntity()));
} catch (IOException e) {
ElasticsearchException ee = new ElasticsearchException("Error extracting body from response");
ee.addSuppressed(e);
throw ee;
}
}
// EMPTY is safe here because we don't call namedObject
try (XContentParser xContentParser = xContentType.xContent().createParser(NamedXContentRegistry.EMPTY, content)) {
parsedResponse = parser.apply(xContentParser, xContentType);
} catch (ParsingException e) {
/* Because we're streaming the response we can't get a copy of it here. The best we can do is hint that it
* is totally wrong and we're probably not talking to Elasticsearch. */
throw new ElasticsearchException("Error parsing the response, remote is likely not an Elasticsearch instance", e);
}
} catch (IOException e) {
throw new ElasticsearchException("Error deserializing response, remote is likely not an Elasticsearch instance", e);
}
listener.accept(parsedResponse);
}
}
@Override
public void onFailure(Exception e) {
try (ThreadContext.StoredContext ctx = contextSupplier.get()) {
// eliminates compiler warning
assert ctx != null;
if (e instanceof ResponseException) {
ResponseException re = (ResponseException) e;
if (RestStatus.TOO_MANY_REQUESTS.getStatus() == re.getResponse().getStatusLine().getStatusCode()) {
if (retries.hasNext()) {
TimeValue delay = retries.next();
logger.trace((Supplier<?>) () -> new ParameterizedMessage("retrying rejected search after [{}]", delay), e);
countSearchRetry.run();
threadPool.schedule(delay, ThreadPool.Names.SAME, RetryHelper.this);
return;
}
}
e = wrapExceptionToPreserveStatus(re.getResponse().getStatusLine().getStatusCode(), re.getResponse().getEntity(), re);
} else if (e instanceof ContentTooLongException) {
e = new IllegalArgumentException("Remote responded with a chunk that was too large. Use a smaller batch size.", e);
}
fail.accept(e);
}
}
});
}
@Override
public void onFailure(Exception t) {
fail.accept(t);
}
}
new RetryHelper().run();
}
use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.
the class AwsEc2UnicastHostsProvider method fetchDynamicNodes.
protected List<DiscoveryNode> fetchDynamicNodes() {
List<DiscoveryNode> discoNodes = new ArrayList<>();
DescribeInstancesResult descInstances;
try {
// Query EC2 API based on AZ, instance state, and tag.
// NOTE: we don't filter by security group during the describe instances request for two reasons:
// 1. differences in VPCs require different parameters during query (ID vs Name)
// 2. We want to use two different strategies: (all security groups vs. any security groups)
descInstances = SocketAccess.doPrivileged(() -> client.describeInstances(buildDescribeInstancesRequest()));
} catch (AmazonClientException e) {
logger.info("Exception while retrieving instance list from AWS API: {}", e.getMessage());
logger.debug("Full exception:", e);
return discoNodes;
}
logger.trace("building dynamic unicast discovery nodes...");
for (Reservation reservation : descInstances.getReservations()) {
for (Instance instance : reservation.getInstances()) {
// lets see if we can filter based on groups
if (!groups.isEmpty()) {
List<GroupIdentifier> instanceSecurityGroups = instance.getSecurityGroups();
ArrayList<String> securityGroupNames = new ArrayList<String>();
ArrayList<String> securityGroupIds = new ArrayList<String>();
for (GroupIdentifier sg : instanceSecurityGroups) {
securityGroupNames.add(sg.getGroupName());
securityGroupIds.add(sg.getGroupId());
}
if (bindAnyGroup) {
// We check if we can find at least one group name or one group id in groups.
if (disjoint(securityGroupNames, groups) && disjoint(securityGroupIds, groups)) {
logger.trace("filtering out instance {} based on groups {}, not part of {}", instance.getInstanceId(), instanceSecurityGroups, groups);
// continue to the next instance
continue;
}
} else {
// We need tp match all group names or group ids, otherwise we ignore this instance
if (!(securityGroupNames.containsAll(groups) || securityGroupIds.containsAll(groups))) {
logger.trace("filtering out instance {} based on groups {}, does not include all of {}", instance.getInstanceId(), instanceSecurityGroups, groups);
// continue to the next instance
continue;
}
}
}
String address = null;
if (hostType.equals(PRIVATE_DNS)) {
address = instance.getPrivateDnsName();
} else if (hostType.equals(PRIVATE_IP)) {
address = instance.getPrivateIpAddress();
} else if (hostType.equals(PUBLIC_DNS)) {
address = instance.getPublicDnsName();
} else if (hostType.equals(PUBLIC_IP)) {
address = instance.getPublicIpAddress();
} else if (hostType.startsWith(TAG_PREFIX)) {
// Reading the node host from its metadata
String tagName = hostType.substring(TAG_PREFIX.length());
logger.debug("reading hostname from [{}] instance tag", tagName);
List<Tag> tags = instance.getTags();
for (Tag tag : tags) {
if (tag.getKey().equals(tagName)) {
address = tag.getValue();
logger.debug("using [{}] as the instance address", address);
}
}
} else {
throw new IllegalArgumentException(hostType + " is unknown for discovery.ec2.host_type");
}
if (address != null) {
try {
// we only limit to 1 port per address, makes no sense to ping 100 ports
TransportAddress[] addresses = transportService.addressesFromString(address, 1);
for (int i = 0; i < addresses.length; i++) {
logger.trace("adding {}, address {}, transport_address {}", instance.getInstanceId(), address, addresses[i]);
discoNodes.add(new DiscoveryNode(instance.getInstanceId(), "#cloud-" + instance.getInstanceId() + "-" + i, addresses[i], emptyMap(), emptySet(), Version.CURRENT.minimumCompatibilityVersion()));
}
} catch (Exception e) {
final String finalAddress = address;
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to add {}, address {}", instance.getInstanceId(), finalAddress), e);
}
} else {
logger.trace("not adding {}, address is null, host_type {}", instance.getInstanceId(), hostType);
}
}
}
logger.debug("using dynamic discovery nodes {}", discoNodes);
return discoNodes;
}
use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.
the class FileBasedUnicastHostsProvider method buildDynamicNodes.
@Override
public List<DiscoveryNode> buildDynamicNodes() {
List<String> hostsList;
try (Stream<String> lines = Files.lines(unicastHostsFilePath)) {
hostsList = // lines starting with `#` are comments
lines.filter(line -> line.startsWith("#") == false).collect(Collectors.toList());
} catch (FileNotFoundException | NoSuchFileException e) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[discovery-file] Failed to find unicast hosts file [{}]", unicastHostsFilePath), e);
hostsList = Collections.emptyList();
} catch (IOException e) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[discovery-file] Error reading unicast hosts file [{}]", unicastHostsFilePath), e);
hostsList = Collections.emptyList();
}
final List<DiscoveryNode> discoNodes = new ArrayList<>();
try {
discoNodes.addAll(resolveHostsLists(executorService, logger, hostsList, 1, transportService, UNICAST_HOST_PREFIX, resolveTimeout));
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
logger.debug("[discovery-file] Using dynamic discovery nodes {}", discoNodes);
return discoNodes;
}
use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.
the class AbstractSimpleTransportTestCase method testConcurrentSendRespondAndDisconnect.
public void testConcurrentSendRespondAndDisconnect() throws BrokenBarrierException, InterruptedException {
Set<Exception> sendingErrors = ConcurrentCollections.newConcurrentSet();
Set<Exception> responseErrors = ConcurrentCollections.newConcurrentSet();
serviceA.registerRequestHandler("test", TestRequest::new, randomBoolean() ? ThreadPool.Names.SAME : ThreadPool.Names.GENERIC, (request, channel) -> {
try {
channel.sendResponse(new TestResponse());
} catch (Exception e) {
logger.info("caught exception while responding", e);
responseErrors.add(e);
}
});
final TransportRequestHandler<TestRequest> ignoringRequestHandler = (request, channel) -> {
try {
channel.sendResponse(new TestResponse());
} catch (Exception e) {
// we don't really care what's going on B, we're testing through A
logger.trace("caught exception while responding from node B", e);
}
};
serviceB.registerRequestHandler("test", TestRequest::new, ThreadPool.Names.SAME, ignoringRequestHandler);
int halfSenders = scaledRandomIntBetween(3, 10);
final CyclicBarrier go = new CyclicBarrier(halfSenders * 2 + 1);
final CountDownLatch done = new CountDownLatch(halfSenders * 2);
for (int i = 0; i < halfSenders; i++) {
// B senders just generated activity so serciveA can respond, we don't test what's going on there
final int sender = i;
threadPool.executor(ThreadPool.Names.GENERIC).execute(new AbstractRunnable() {
@Override
public void onFailure(Exception e) {
logger.trace("caught exception while sending from B", e);
}
@Override
protected void doRun() throws Exception {
go.await();
for (int iter = 0; iter < 10; iter++) {
PlainActionFuture<TestResponse> listener = new PlainActionFuture<>();
final String info = sender + "_B_" + iter;
serviceB.sendRequest(nodeA, "test", new TestRequest(info), new ActionListenerResponseHandler<>(listener, TestResponse::new));
try {
listener.actionGet();
} catch (Exception e) {
logger.trace((Supplier<?>) () -> new ParameterizedMessage("caught exception while sending to node {}", nodeA), e);
}
}
}
@Override
public void onAfter() {
done.countDown();
}
});
}
for (int i = 0; i < halfSenders; i++) {
final int sender = i;
threadPool.executor(ThreadPool.Names.GENERIC).execute(new AbstractRunnable() {
@Override
public void onFailure(Exception e) {
logger.error("unexpected error", e);
sendingErrors.add(e);
}
@Override
protected void doRun() throws Exception {
go.await();
for (int iter = 0; iter < 10; iter++) {
PlainActionFuture<TestResponse> listener = new PlainActionFuture<>();
final String info = sender + "_" + iter;
// capture now
final DiscoveryNode node = nodeB;
try {
serviceA.sendRequest(node, "test", new TestRequest(info), new ActionListenerResponseHandler<>(listener, TestResponse::new));
try {
listener.actionGet();
} catch (ConnectTransportException e) {
// ok!
} catch (Exception e) {
logger.error((Supplier<?>) () -> new ParameterizedMessage("caught exception while sending to node {}", node), e);
sendingErrors.add(e);
}
} catch (NodeNotConnectedException ex) {
// ok
}
}
}
@Override
public void onAfter() {
done.countDown();
}
});
}
go.await();
for (int i = 0; i <= 10; i++) {
if (i % 3 == 0) {
// simulate restart of nodeB
serviceB.close();
MockTransportService newService = buildService("TS_B_" + i, version1, null);
newService.registerRequestHandler("test", TestRequest::new, ThreadPool.Names.SAME, ignoringRequestHandler);
serviceB = newService;
nodeB = newService.getLocalDiscoNode();
serviceB.connectToNode(nodeA);
serviceA.connectToNode(nodeB);
} else if (serviceA.nodeConnected(nodeB)) {
serviceA.disconnectFromNode(nodeB);
} else {
serviceA.connectToNode(nodeB);
}
}
done.await();
assertThat("found non connection errors while sending", sendingErrors, empty());
assertThat("found non connection errors while responding", responseErrors, empty());
}
use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.
the class NodeJoinControllerTests method joinNodeAsync.
private SimpleFuture joinNodeAsync(final DiscoveryNode node) throws InterruptedException {
final SimpleFuture future = new SimpleFuture("join of " + node + " (id [" + joinId.incrementAndGet() + "]");
logger.debug("starting {}", future);
// clone the node before submitting to simulate an incoming join, which is guaranteed to have a new
// disco node object serialized off the network
nodeJoinController.handleJoinRequest(cloneNode(node), new MembershipAction.JoinCallback() {
@Override
public void onSuccess() {
logger.debug("{} completed", future);
future.markAsDone();
}
@Override
public void onFailure(Exception e) {
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected error for {}", future), e);
future.markAsFailed(e);
}
});
return future;
}
Aggregations