use of org.elasticsearch.client.transport.NoNodeAvailableException in project elasticsearch by elastic.
the class ElasticsearchExceptionTests method testFailureToAndFromXContentWithDetails.
public void testFailureToAndFromXContentWithDetails() throws IOException {
final XContent xContent = randomFrom(XContentType.values()).xContent();
Exception failure;
Throwable failureCause;
ElasticsearchException expected;
ElasticsearchException expectedCause;
ElasticsearchException suppressed;
switch(randomIntBetween(0, 6)) {
case // Simple elasticsearch exception without cause
0:
failure = new NoNodeAvailableException("A");
expected = new ElasticsearchException("Elasticsearch exception [type=no_node_available_exception, reason=A]");
expected.addSuppressed(new ElasticsearchException("Elasticsearch exception [type=no_node_available_exception, reason=A]"));
break;
case // Simple elasticsearch exception with headers (other metadata of type number are not parsed)
1:
failure = new CircuitBreakingException("B", 5_000, 2_000);
((ElasticsearchException) failure).addHeader("header_name", "0", "1");
expected = new ElasticsearchException("Elasticsearch exception [type=circuit_breaking_exception, reason=B]");
expected.addHeader("header_name", "0", "1");
suppressed = new ElasticsearchException("Elasticsearch exception [type=circuit_breaking_exception, reason=B]");
suppressed.addHeader("header_name", "0", "1");
expected.addSuppressed(suppressed);
break;
case // Elasticsearch exception with a cause, headers and parsable metadata
2:
failureCause = new NullPointerException("var is null");
failure = new ScriptException("C", failureCause, singletonList("stack"), "test", "painless");
((ElasticsearchException) failure).addHeader("script_name", "my_script");
expectedCause = new ElasticsearchException("Elasticsearch exception [type=null_pointer_exception, reason=var is null]");
expected = new ElasticsearchException("Elasticsearch exception [type=script_exception, reason=C]", expectedCause);
expected.addHeader("script_name", "my_script");
expected.addMetadata("es.lang", "painless");
expected.addMetadata("es.script", "test");
expected.addMetadata("es.script_stack", "stack");
suppressed = new ElasticsearchException("Elasticsearch exception [type=script_exception, reason=C]");
suppressed.addHeader("script_name", "my_script");
suppressed.addMetadata("es.lang", "painless");
suppressed.addMetadata("es.script", "test");
suppressed.addMetadata("es.script_stack", "stack");
expected.addSuppressed(suppressed);
break;
case // JDK exception without cause
3:
failure = new IllegalStateException("D");
expected = new ElasticsearchException("Elasticsearch exception [type=illegal_state_exception, reason=D]");
suppressed = new ElasticsearchException("Elasticsearch exception [type=illegal_state_exception, reason=D]");
expected.addSuppressed(suppressed);
break;
case // JDK exception with cause
4:
failureCause = new RoutingMissingException("idx", "type", "id");
failure = new RuntimeException("E", failureCause);
expectedCause = new ElasticsearchException("Elasticsearch exception [type=routing_missing_exception, " + "reason=routing is required for [idx]/[type]/[id]]");
expectedCause.addMetadata("es.index", "idx");
expectedCause.addMetadata("es.index_uuid", "_na_");
expected = new ElasticsearchException("Elasticsearch exception [type=runtime_exception, reason=E]", expectedCause);
suppressed = new ElasticsearchException("Elasticsearch exception [type=runtime_exception, reason=E]");
expected.addSuppressed(suppressed);
break;
case // Wrapped exception with cause
5:
failureCause = new FileAlreadyExistsException("File exists");
failure = new BroadcastShardOperationFailedException(new ShardId("_index", "_uuid", 5), "F", failureCause);
expected = new ElasticsearchException("Elasticsearch exception [type=file_already_exists_exception, reason=File exists]");
// strangely, the wrapped exception appears as the root cause...
suppressed = new ElasticsearchException("Elasticsearch exception [type=broadcast_shard_operation_failed_exception, " + "reason=F]");
expected.addSuppressed(suppressed);
break;
case // SearchPhaseExecutionException with cause and multiple failures
6:
DiscoveryNode node = new DiscoveryNode("node_g", buildNewFakeTransportAddress(), Version.CURRENT);
failureCause = new NodeClosedException(node);
failureCause = new NoShardAvailableActionException(new ShardId("_index_g", "_uuid_g", 6), "node_g", failureCause);
ShardSearchFailure[] shardFailures = new ShardSearchFailure[] { new ShardSearchFailure(new ParsingException(0, 0, "Parsing g", null), new SearchShardTarget("node_g", new ShardId(new Index("_index_g", "_uuid_g"), 61))), new ShardSearchFailure(new RepositoryException("repository_g", "Repo"), new SearchShardTarget("node_g", new ShardId(new Index("_index_g", "_uuid_g"), 62))), new ShardSearchFailure(new SearchContextMissingException(0L), null) };
failure = new SearchPhaseExecutionException("phase_g", "G", failureCause, shardFailures);
expectedCause = new ElasticsearchException("Elasticsearch exception [type=node_closed_exception, " + "reason=node closed " + node + "]");
expectedCause = new ElasticsearchException("Elasticsearch exception [type=no_shard_available_action_exception, " + "reason=node_g]", expectedCause);
expectedCause.addMetadata("es.index", "_index_g");
expectedCause.addMetadata("es.index_uuid", "_uuid_g");
expectedCause.addMetadata("es.shard", "6");
expected = new ElasticsearchException("Elasticsearch exception [type=search_phase_execution_exception, " + "reason=G]", expectedCause);
expected.addMetadata("es.phase", "phase_g");
expected.addSuppressed(new ElasticsearchException("Elasticsearch exception [type=parsing_exception, reason=Parsing g]"));
expected.addSuppressed(new ElasticsearchException("Elasticsearch exception [type=repository_exception, " + "reason=[repository_g] Repo]"));
expected.addSuppressed(new ElasticsearchException("Elasticsearch exception [type=search_context_missing_exception, " + "reason=No search context found for id [0]]"));
break;
default:
throw new UnsupportedOperationException("Failed to generate randomized failure");
}
Exception finalFailure = failure;
BytesReference failureBytes = XContentHelper.toXContent((builder, params) -> {
ElasticsearchException.generateFailureXContent(builder, params, finalFailure, true);
return builder;
}, xContent.type(), randomBoolean());
try (XContentParser parser = createParser(xContent, failureBytes)) {
failureBytes = shuffleXContent(parser, randomBoolean()).bytes();
}
ElasticsearchException parsedFailure;
try (XContentParser parser = createParser(xContent, failureBytes)) {
assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken());
assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken());
parsedFailure = ElasticsearchException.failureFromXContent(parser);
assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken());
assertNull(parser.nextToken());
}
assertDeepEquals(expected, parsedFailure);
}
use of org.elasticsearch.client.transport.NoNodeAvailableException in project elasticsearch-jdbc by jprante.
the class ClientTests method testClient.
@Test
public void testClient() throws IOException {
// disable DNS caching for failover
Security.setProperty("networkaddress.cache.ttl", "0");
Settings settings = Settings.settingsBuilder().putArray("elasticsearch.host", new String[] { "localhost:9300", "localhost:9301" }).put("transport.type", "org.elasticsearch.transport.netty.FoundNettyTransport").put("transport.found.ssl-ports", 9443).build();
Integer maxbulkactions = settings.getAsInt("max_bulk_actions", 10000);
Integer maxconcurrentbulkrequests = settings.getAsInt("max_concurrent_bulk_requests", Runtime.getRuntime().availableProcessors() * 2);
ByteSizeValue maxvolume = settings.getAsBytesSize("max_bulk_volume", ByteSizeValue.parseBytesSizeValue("10m", ""));
TimeValue flushinterval = settings.getAsTime("flush_interval", TimeValue.timeValueSeconds(5));
Settings.Builder clientSettings = Settings.settingsBuilder().put("cluster.name", settings.get("elasticsearch.cluster", "elasticsearch")).putArray("host", settings.getAsArray("elasticsearch.host")).put("port", settings.getAsInt("elasticsearch.port", 9300)).put("sniff", settings.getAsBoolean("elasticsearch.sniff", false)).put("autodiscover", settings.getAsBoolean("elasticsearch.autodiscover", false)).put("name", // prevents lookup of names.txt, we don't have it, and marks this node as "feeder". See also module load skipping in JDBCRiverPlugin
"feeder").put("client.transport.ignore_cluster_name", // do not ignore cluster name setting
false).put("client.transport.ping_timeout", // ping timeout
settings.getAsTime("elasticsearch.timeout", TimeValue.timeValueSeconds(10))).put("client.transport.nodes_sampler_interval", // for sniff sampling
settings.getAsTime("elasticsearch.timeout", TimeValue.timeValueSeconds(5))).put("path.plugins", // pointing to a non-exiting folder means, this disables loading site plugins
".dontexist").put("path.home", System.getProperty("path.home"));
if (settings.get("transport.type") != null) {
clientSettings.put("transport.type", settings.get("transport.type"));
}
// copy found.no transport settings
Settings foundTransportSettings = settings.getAsSettings("transport.found");
if (foundTransportSettings != null) {
Map<String, String> foundTransportSettingsMap = foundTransportSettings.getAsMap();
for (Map.Entry<String, String> entry : foundTransportSettingsMap.entrySet()) {
clientSettings.put("transport.found." + entry.getKey(), entry.getValue());
}
}
try {
ClientAPI clientAPI = ClientBuilder.builder().put(settings).put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, maxbulkactions).put(ClientBuilder.MAX_CONCURRENT_REQUESTS, maxconcurrentbulkrequests).put(ClientBuilder.MAX_VOLUME_PER_REQUEST, maxvolume).put(ClientBuilder.FLUSH_INTERVAL, flushinterval).setMetric(new ElasticsearchIngestMetric()).toBulkTransportClient();
} catch (NoNodeAvailableException e) {
// ok
}
}
use of org.elasticsearch.client.transport.NoNodeAvailableException in project elasticsearch by elastic.
the class UpdateIT method testStressUpdateDeleteConcurrency.
public void testStressUpdateDeleteConcurrency() throws Exception {
//We create an index with merging disabled so that deletes don't get merged away
assertAcked(prepareCreate("test").setSettings(Settings.builder().put(MergePolicyConfig.INDEX_MERGE_ENABLED, false)));
ensureGreen();
final int numberOfThreads = scaledRandomIntBetween(3, 5);
final int numberOfIdsPerThread = scaledRandomIntBetween(3, 10);
final int numberOfUpdatesPerId = scaledRandomIntBetween(10, 100);
final int retryOnConflict = randomIntBetween(0, 1);
final CountDownLatch latch = new CountDownLatch(numberOfThreads);
final CountDownLatch startLatch = new CountDownLatch(1);
final List<Throwable> failures = new CopyOnWriteArrayList<>();
final class UpdateThread extends Thread {
final Map<Integer, Integer> failedMap = new HashMap<>();
final int numberOfIds;
final int updatesPerId;
final int maxUpdateRequests = numberOfIdsPerThread * numberOfUpdatesPerId;
final int maxDeleteRequests = numberOfIdsPerThread * numberOfUpdatesPerId;
private final Semaphore updateRequestsOutstanding = new Semaphore(maxUpdateRequests);
private final Semaphore deleteRequestsOutstanding = new Semaphore(maxDeleteRequests);
UpdateThread(int numberOfIds, int updatesPerId) {
this.numberOfIds = numberOfIds;
this.updatesPerId = updatesPerId;
}
final class UpdateListener implements ActionListener<UpdateResponse> {
int id;
UpdateListener(int id) {
this.id = id;
}
@Override
public void onResponse(UpdateResponse updateResponse) {
updateRequestsOutstanding.release(1);
}
@Override
public void onFailure(Exception e) {
synchronized (failedMap) {
incrementMapValue(id, failedMap);
}
updateRequestsOutstanding.release(1);
}
}
final class DeleteListener implements ActionListener<DeleteResponse> {
int id;
DeleteListener(int id) {
this.id = id;
}
@Override
public void onResponse(DeleteResponse deleteResponse) {
deleteRequestsOutstanding.release(1);
}
@Override
public void onFailure(Exception e) {
synchronized (failedMap) {
incrementMapValue(id, failedMap);
}
deleteRequestsOutstanding.release(1);
}
}
@Override
public void run() {
try {
startLatch.await();
boolean hasWaitedForNoNode = false;
for (int j = 0; j < numberOfIds; j++) {
for (int k = 0; k < numberOfUpdatesPerId; ++k) {
updateRequestsOutstanding.acquire();
try {
UpdateRequest ur = client().prepareUpdate("test", "type1", Integer.toString(j)).setScript(new Script(ScriptType.INLINE, "field_inc", "field", Collections.emptyMap())).setRetryOnConflict(retryOnConflict).setUpsert(jsonBuilder().startObject().field("field", 1).endObject()).request();
client().update(ur, new UpdateListener(j));
} catch (NoNodeAvailableException nne) {
updateRequestsOutstanding.release();
synchronized (failedMap) {
incrementMapValue(j, failedMap);
}
if (hasWaitedForNoNode) {
throw nne;
}
logger.warn("Got NoNodeException waiting for 1 second for things to recover.");
hasWaitedForNoNode = true;
Thread.sleep(1000);
}
try {
deleteRequestsOutstanding.acquire();
DeleteRequest dr = client().prepareDelete("test", "type1", Integer.toString(j)).request();
client().delete(dr, new DeleteListener(j));
} catch (NoNodeAvailableException nne) {
deleteRequestsOutstanding.release();
synchronized (failedMap) {
incrementMapValue(j, failedMap);
}
if (hasWaitedForNoNode) {
throw nne;
}
logger.warn("Got NoNodeException waiting for 1 second for things to recover.");
hasWaitedForNoNode = true;
//Wait for no-node to clear
Thread.sleep(1000);
}
}
}
} catch (Exception e) {
logger.error("Something went wrong", e);
failures.add(e);
} finally {
try {
waitForOutstandingRequests(TimeValue.timeValueSeconds(60), updateRequestsOutstanding, maxUpdateRequests, "Update");
waitForOutstandingRequests(TimeValue.timeValueSeconds(60), deleteRequestsOutstanding, maxDeleteRequests, "Delete");
} catch (ElasticsearchTimeoutException ete) {
failures.add(ete);
}
latch.countDown();
}
}
private void incrementMapValue(int j, Map<Integer, Integer> map) {
if (!map.containsKey(j)) {
map.put(j, 0);
}
map.put(j, map.get(j) + 1);
}
private void waitForOutstandingRequests(TimeValue timeOut, Semaphore requestsOutstanding, int maxRequests, String name) {
long start = System.currentTimeMillis();
do {
long msRemaining = timeOut.getMillis() - (System.currentTimeMillis() - start);
logger.info("[{}] going to try and acquire [{}] in [{}]ms [{}] available to acquire right now", name, maxRequests, msRemaining, requestsOutstanding.availablePermits());
try {
requestsOutstanding.tryAcquire(maxRequests, msRemaining, TimeUnit.MILLISECONDS);
return;
} catch (InterruptedException ie) {
//Just keep swimming
}
} while ((System.currentTimeMillis() - start) < timeOut.getMillis());
throw new ElasticsearchTimeoutException("Requests were still outstanding after the timeout [" + timeOut + "] for type [" + name + "]");
}
}
final List<UpdateThread> threads = new ArrayList<>();
for (int i = 0; i < numberOfThreads; i++) {
UpdateThread ut = new UpdateThread(numberOfIdsPerThread, numberOfUpdatesPerId);
ut.start();
threads.add(ut);
}
startLatch.countDown();
latch.await();
for (UpdateThread ut : threads) {
//Threads should have finished because of the latch.await
ut.join();
}
//aquiring the request outstanding semaphores.
for (Throwable throwable : failures) {
logger.info("Captured failure on concurrent update:", throwable);
}
assertThat(failures.size(), equalTo(0));
//All the previous operations should be complete or failed at this point
for (int i = 0; i < numberOfIdsPerThread; ++i) {
UpdateResponse ur = client().prepareUpdate("test", "type1", Integer.toString(i)).setScript(new Script(ScriptType.INLINE, "field_inc", "field", Collections.emptyMap())).setRetryOnConflict(Integer.MAX_VALUE).setUpsert(jsonBuilder().startObject().field("field", 1).endObject()).execute().actionGet();
}
refresh();
for (int i = 0; i < numberOfIdsPerThread; ++i) {
int totalFailures = 0;
GetResponse response = client().prepareGet("test", "type1", Integer.toString(i)).execute().actionGet();
if (response.isExists()) {
assertThat(response.getId(), equalTo(Integer.toString(i)));
int expectedVersion = (numberOfThreads * numberOfUpdatesPerId * 2) + 1;
for (UpdateThread ut : threads) {
if (ut.failedMap.containsKey(i)) {
totalFailures += ut.failedMap.get(i);
}
}
expectedVersion -= totalFailures;
logger.error("Actual version [{}] Expected version [{}] Total failures [{}]", response.getVersion(), expectedVersion, totalFailures);
assertThat(response.getVersion(), equalTo((long) expectedVersion));
assertThat(response.getVersion() + totalFailures, equalTo((long) ((numberOfUpdatesPerId * numberOfThreads * 2) + 1)));
}
}
}
use of org.elasticsearch.client.transport.NoNodeAvailableException in project elasticsearch-jdbc by jprante.
the class ClientTests method testFoundSettings.
@Test
public void testFoundSettings() throws IOException {
// disable DNS caching for failover
Security.setProperty("networkaddress.cache.ttl", "0");
Settings settings = Settings.settingsBuilder().putArray("elasticsearch.host", new String[] { "localhost:9300", "localhost:9301" }).put("transport.type", "org.elasticsearch.transport.netty.FoundNettyTransport").put("transport.found.ssl-ports", 9443).build();
Integer maxbulkactions = settings.getAsInt("max_bulk_actions", 10000);
Integer maxconcurrentbulkrequests = settings.getAsInt("max_concurrent_bulk_requests", Runtime.getRuntime().availableProcessors() * 2);
ByteSizeValue maxvolume = settings.getAsBytesSize("max_bulk_volume", ByteSizeValue.parseBytesSizeValue("10m", ""));
TimeValue flushinterval = settings.getAsTime("flush_interval", TimeValue.timeValueSeconds(5));
Settings.Builder clientSettings = Settings.settingsBuilder().put("cluster.name", settings.get("elasticsearch.cluster", "elasticsearch")).putArray("host", settings.getAsArray("elasticsearch.host")).put("port", settings.getAsInt("elasticsearch.port", 9300)).put("sniff", settings.getAsBoolean("elasticsearch.sniff", false)).put("autodiscover", settings.getAsBoolean("elasticsearch.autodiscover", false)).put("name", // prevents lookup of names.txt, we don't have it, and marks this node as "feeder". See also module load skipping in JDBCRiverPlugin
"feeder").put("client.transport.ignore_cluster_name", // do not ignore cluster name setting
false).put("client.transport.ping_timeout", // ping timeout
settings.getAsTime("elasticsearch.timeout", TimeValue.timeValueSeconds(5))).put("client.transport.nodes_sampler_interval", // for sniff sampling
settings.getAsTime("elasticsearch.timeout", TimeValue.timeValueSeconds(5))).put("path.plugins", // pointing to a non-exiting folder means, this disables loading site plugins
".dontexist").put("path.home", System.getProperty("path.home"));
try {
ClientAPI clientAPI = ClientBuilder.builder().put(settings).put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, maxbulkactions).put(ClientBuilder.MAX_CONCURRENT_REQUESTS, maxconcurrentbulkrequests).put(ClientBuilder.MAX_VOLUME_PER_REQUEST, maxvolume).put(ClientBuilder.FLUSH_INTERVAL, flushinterval).setMetric(new ElasticsearchIngestMetric()).toBulkTransportClient();
} catch (NoNodeAvailableException e) {
// ok
}
}
use of org.elasticsearch.client.transport.NoNodeAvailableException in project sonarqube by SonarSource.
the class SearchServerTest method start_stop_server.
@Test
public void start_stop_server() throws Exception {
Props props = new Props(new Properties());
// the following properties have always default values (see ProcessProperties)
InetAddress host = InetAddress.getLoopbackAddress();
props.set(ProcessProperties.SEARCH_HOST, host.getHostAddress());
props.set(ProcessProperties.SEARCH_PORT, String.valueOf(port));
props.set(ProcessProperties.CLUSTER_NAME, A_CLUSTER_NAME);
props.set(EsSettings.CLUSTER_SEARCH_NODE_NAME, A_NODE_NAME);
props.set(ProcessProperties.PATH_HOME, temp.newFolder().getAbsolutePath());
props.set(ProcessEntryPoint.PROPERTY_SHARED_PATH, temp.newFolder().getAbsolutePath());
underTest = new SearchServer(props);
underTest.start();
assertThat(underTest.getStatus()).isEqualTo(Monitored.Status.OPERATIONAL);
Settings settings = Settings.builder().put("cluster.name", A_CLUSTER_NAME).build();
client = TransportClient.builder().settings(settings).build().addTransportAddress(new InetSocketTransportAddress(host, port));
assertThat(client.admin().cluster().prepareClusterStats().get().getStatus()).isEqualTo(ClusterHealthStatus.GREEN);
underTest.stop();
underTest.awaitStop();
underTest = null;
try {
client.admin().cluster().prepareClusterStats().get();
fail();
} catch (NoNodeAvailableException exception) {
// ok
}
}
Aggregations