use of org.elasticsearch.client.Client in project elasticsearch by elastic.
the class TransportRethrottleActionTests method rethrottleTestCase.
/**
* Test rethrottling.
* @param runningSlices the number of slices still running
* @param simulator simulate a response from the sub-request to rethrottle the child requests
* @param verifier verify the resulting response
*/
private void rethrottleTestCase(int runningSlices, Consumer<ActionListener<ListTasksResponse>> simulator, Consumer<ActionListener<TaskInfo>> verifier) {
Client client = mock(Client.class);
String localNodeId = randomAsciiOfLength(5);
float newRequestsPerSecond = randomValueOtherThanMany(f -> f <= 0, () -> randomFloat());
@SuppressWarnings("unchecked") ActionListener<TaskInfo> listener = mock(ActionListener.class);
TransportRethrottleAction.rethrottle(localNodeId, client, task, newRequestsPerSecond, listener);
// Capture the sub request and the listener so we can verify they are sane
ArgumentCaptor<RethrottleRequest> subRequest = ArgumentCaptor.forClass(RethrottleRequest.class);
// Magical generics incantation.....
@SuppressWarnings({ "unchecked", "rawtypes" }) ArgumentCaptor<ActionListener<ListTasksResponse>> subListener = ArgumentCaptor.forClass((Class) ActionListener.class);
if (runningSlices > 0) {
verify(client).execute(eq(RethrottleAction.INSTANCE), subRequest.capture(), subListener.capture());
assertEquals(new TaskId(localNodeId, task.getId()), subRequest.getValue().getParentTaskId());
assertEquals(newRequestsPerSecond / runningSlices, subRequest.getValue().getRequestsPerSecond(), 0.00001f);
simulator.accept(subListener.getValue());
}
verifier.accept(listener);
}
use of org.elasticsearch.client.Client in project elasticsearch by elastic.
the class DiscoveryWithServiceDisruptionsIT method testAckedIndexing.
/**
* Test that we do not loose document whose indexing request was successful, under a randomly selected disruption scheme
* We also collect & report the type of indexing failures that occur.
* <p>
* This test is a superset of tests run in the Jepsen test suite, with the exception of versioned updates
*/
@TestLogging("_root:DEBUG,org.elasticsearch.action.bulk:TRACE,org.elasticsearch.action.get:TRACE,discovery:TRACE," + "org.elasticsearch.cluster.service:TRACE,org.elasticsearch.indices.recovery:TRACE," + "org.elasticsearch.indices.cluster:TRACE,org.elasticsearch.index.shard:TRACE")
public void testAckedIndexing() throws Exception {
final int seconds = !(TEST_NIGHTLY && rarely()) ? 1 : 5;
final String timeout = seconds + "s";
final List<String> nodes = startCluster(rarely() ? 5 : 3);
assertAcked(prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1 + randomInt(2)).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomInt(2)).put(IndexSettings.INDEX_SEQ_NO_CHECKPOINT_SYNC_INTERVAL.getKey(), randomBoolean() ? "5s" : "200ms")));
ensureGreen();
ServiceDisruptionScheme disruptionScheme = addRandomDisruptionScheme();
logger.info("disruption scheme [{}] added", disruptionScheme);
// id -> node sent.
final ConcurrentHashMap<String, String> ackedDocs = new ConcurrentHashMap<>();
final AtomicBoolean stop = new AtomicBoolean(false);
List<Thread> indexers = new ArrayList<>(nodes.size());
List<Semaphore> semaphores = new ArrayList<>(nodes.size());
final AtomicInteger idGenerator = new AtomicInteger(0);
final AtomicReference<CountDownLatch> countDownLatchRef = new AtomicReference<>();
final List<Exception> exceptedExceptions = Collections.synchronizedList(new ArrayList<Exception>());
logger.info("starting indexers");
try {
for (final String node : nodes) {
final Semaphore semaphore = new Semaphore(0);
semaphores.add(semaphore);
final Client client = client(node);
final String name = "indexer_" + indexers.size();
final int numPrimaries = getNumShards("test").numPrimaries;
Thread thread = new Thread(() -> {
while (!stop.get()) {
String id = null;
try {
if (!semaphore.tryAcquire(10, TimeUnit.SECONDS)) {
continue;
}
logger.info("[{}] Acquired semaphore and it has {} permits left", name, semaphore.availablePermits());
try {
id = Integer.toString(idGenerator.incrementAndGet());
int shard = Math.floorMod(Murmur3HashFunction.hash(id), numPrimaries);
logger.trace("[{}] indexing id [{}] through node [{}] targeting shard [{}]", name, id, node, shard);
IndexResponse response = client.prepareIndex("test", "type", id).setSource("{}", XContentType.JSON).setTimeout(timeout).get(timeout);
assertEquals(DocWriteResponse.Result.CREATED, response.getResult());
ackedDocs.put(id, node);
logger.trace("[{}] indexed id [{}] through node [{}]", name, id, node);
} catch (ElasticsearchException e) {
exceptedExceptions.add(e);
final String docId = id;
logger.trace((Supplier<?>) () -> new ParameterizedMessage("[{}] failed id [{}] through node [{}]", name, docId, node), e);
} finally {
countDownLatchRef.get().countDown();
logger.trace("[{}] decreased counter : {}", name, countDownLatchRef.get().getCount());
}
} catch (InterruptedException e) {
// fine - semaphore interrupt
} catch (AssertionError | Exception e) {
logger.info((Supplier<?>) () -> new ParameterizedMessage("unexpected exception in background thread of [{}]", node), e);
}
}
});
thread.setName(name);
thread.start();
indexers.add(thread);
}
int docsPerIndexer = randomInt(3);
logger.info("indexing {} docs per indexer before partition", docsPerIndexer);
countDownLatchRef.set(new CountDownLatch(docsPerIndexer * indexers.size()));
for (Semaphore semaphore : semaphores) {
semaphore.release(docsPerIndexer);
}
assertTrue(countDownLatchRef.get().await(1, TimeUnit.MINUTES));
for (int iter = 1 + randomInt(2); iter > 0; iter--) {
logger.info("starting disruptions & indexing (iteration [{}])", iter);
disruptionScheme.startDisrupting();
docsPerIndexer = 1 + randomInt(5);
logger.info("indexing {} docs per indexer during partition", docsPerIndexer);
countDownLatchRef.set(new CountDownLatch(docsPerIndexer * indexers.size()));
Collections.shuffle(semaphores, random());
for (Semaphore semaphore : semaphores) {
assertThat(semaphore.availablePermits(), equalTo(0));
semaphore.release(docsPerIndexer);
}
logger.info("waiting for indexing requests to complete");
assertTrue(countDownLatchRef.get().await(docsPerIndexer * seconds * 1000 + 2000, TimeUnit.MILLISECONDS));
logger.info("stopping disruption");
disruptionScheme.stopDisrupting();
for (String node : internalCluster().getNodeNames()) {
ensureStableCluster(nodes.size(), TimeValue.timeValueMillis(disruptionScheme.expectedTimeToHeal().millis() + DISRUPTION_HEALING_OVERHEAD.millis()), true, node);
}
ensureGreen("test");
logger.info("validating successful docs");
assertBusy(() -> {
for (String node : nodes) {
try {
logger.debug("validating through node [{}] ([{}] acked docs)", node, ackedDocs.size());
for (String id : ackedDocs.keySet()) {
assertTrue("doc [" + id + "] indexed via node [" + ackedDocs.get(id) + "] not found", client(node).prepareGet("test", "type", id).setPreference("_local").get().isExists());
}
} catch (AssertionError | NoShardAvailableActionException e) {
throw new AssertionError(e.getMessage() + " (checked via node [" + node + "]", e);
}
}
}, 30, TimeUnit.SECONDS);
logger.info("done validating (iteration [{}])", iter);
}
} finally {
if (exceptedExceptions.size() > 0) {
StringBuilder sb = new StringBuilder();
for (Exception e : exceptedExceptions) {
sb.append("\n").append(e.getMessage());
}
logger.debug("Indexing exceptions during disruption: {}", sb);
}
logger.info("shutting down indexers");
stop.set(true);
for (Thread indexer : indexers) {
indexer.interrupt();
indexer.join(60000);
}
}
}
use of org.elasticsearch.client.Client in project elasticsearch by elastic.
the class GatewayIndexStateIT method testIndexDeletionWhenNodeRejoins.
/**
* This test ensures that when an index deletion takes place while a node is offline, when that
* node rejoins the cluster, it deletes the index locally instead of importing it as a dangling index.
*/
public void testIndexDeletionWhenNodeRejoins() throws Exception {
final String indexName = "test-index-del-on-node-rejoin-idx";
final int numNodes = 2;
final List<String> nodes;
if (randomBoolean()) {
// test with a regular index
logger.info("--> starting a cluster with " + numNodes + " nodes");
nodes = internalCluster().startNodes(numNodes);
logger.info("--> create an index");
createIndex(indexName);
} else {
// test with a shadow replica index
final Path dataPath = createTempDir();
logger.info("--> created temp data path for shadow replicas [{}]", dataPath);
logger.info("--> starting a cluster with " + numNodes + " nodes");
final Settings nodeSettings = Settings.builder().put("node.add_lock_id_to_custom_path", false).put(Environment.PATH_SHARED_DATA_SETTING.getKey(), dataPath.toString()).put("index.store.fs.fs_lock", randomFrom("native", "simple")).build();
nodes = internalCluster().startNodes(numNodes, nodeSettings);
logger.info("--> create a shadow replica index");
createShadowReplicaIndex(indexName, dataPath, numNodes - 1);
}
logger.info("--> waiting for green status");
ensureGreen();
final String indexUUID = resolveIndex(indexName).getUUID();
logger.info("--> restart a random date node, deleting the index in between stopping and restarting");
internalCluster().restartRandomDataNode(new RestartCallback() {
@Override
public Settings onNodeStopped(final String nodeName) throws Exception {
nodes.remove(nodeName);
logger.info("--> stopped node[{}], remaining nodes {}", nodeName, nodes);
assert nodes.size() > 0;
final String otherNode = nodes.get(0);
logger.info("--> delete index and verify it is deleted");
final Client client = client(otherNode);
client.admin().indices().prepareDelete(indexName).execute().actionGet();
assertFalse(client.admin().indices().prepareExists(indexName).execute().actionGet().isExists());
return super.onNodeStopped(nodeName);
}
});
logger.info("--> wait until all nodes are back online");
client().admin().cluster().health(Requests.clusterHealthRequest().waitForEvents(Priority.LANGUID).waitForNodes(Integer.toString(numNodes))).actionGet();
logger.info("--> waiting for green status");
ensureGreen();
logger.info("--> verify that the deleted index is removed from the cluster and not reimported as dangling by the restarted node");
assertFalse(client().admin().indices().prepareExists(indexName).execute().actionGet().isExists());
assertBusy(() -> {
final NodeEnvironment nodeEnv = internalCluster().getInstance(NodeEnvironment.class);
try {
assertFalse("index folder " + indexUUID + " should be deleted", nodeEnv.availableIndexFolders().contains(indexUUID));
} catch (IOException e) {
logger.error("Unable to retrieve available index folders from the node", e);
fail("Unable to retrieve available index folders from the node");
}
});
}
use of org.elasticsearch.client.Client in project elasticsearch by elastic.
the class TransportReindexAction method buildRestClient.
/**
* Build the {@link RestClient} used for reindexing from remote clusters.
* @param remoteInfo connection information for the remote cluster
* @param taskId the id of the current task. This is added to the thread name for easier tracking
* @param threadCollector a list in which we collect all the threads created by the client
*/
static RestClient buildRestClient(RemoteInfo remoteInfo, long taskId, List<Thread> threadCollector) {
Header[] clientHeaders = new Header[remoteInfo.getHeaders().size()];
int i = 0;
for (Map.Entry<String, String> header : remoteInfo.getHeaders().entrySet()) {
clientHeaders[i] = new BasicHeader(header.getKey(), header.getValue());
}
return RestClient.builder(new HttpHost(remoteInfo.getHost(), remoteInfo.getPort(), remoteInfo.getScheme())).setDefaultHeaders(clientHeaders).setRequestConfigCallback(c -> {
c.setConnectTimeout(Math.toIntExact(remoteInfo.getConnectTimeout().millis()));
c.setSocketTimeout(Math.toIntExact(remoteInfo.getSocketTimeout().millis()));
return c;
}).setHttpClientConfigCallback(c -> {
if (remoteInfo.getUsername() != null) {
UsernamePasswordCredentials creds = new UsernamePasswordCredentials(remoteInfo.getUsername(), remoteInfo.getPassword());
CredentialsProvider credentialsProvider = new BasicCredentialsProvider();
credentialsProvider.setCredentials(AuthScope.ANY, creds);
c.setDefaultCredentialsProvider(credentialsProvider);
}
AtomicInteger threads = new AtomicInteger();
c.setThreadFactory(r -> {
String name = "es-client-" + taskId + "-" + threads.getAndIncrement();
Thread t = new Thread(r, name);
threadCollector.add(t);
return t;
});
c.setDefaultIOReactorConfig(IOReactorConfig.custom().setIoThreadCount(1).build());
return c;
}).build();
}
use of org.elasticsearch.client.Client in project elasticsearch by elastic.
the class SimpleValidateQueryIT method testExplainValidateQueryTwoNodes.
public void testExplainValidateQueryTwoNodes() throws IOException {
createIndex("test");
ensureGreen();
client().admin().indices().preparePutMapping("test").setType("type1").setSource(XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("foo").field("type", "text").endObject().startObject("bar").field("type", "integer").endObject().startObject("baz").field("type", "text").field("analyzer", "snowball").endObject().startObject("pin").startObject("properties").startObject("location").field("type", "geo_point").endObject().endObject().endObject().endObject().endObject().endObject()).execute().actionGet();
refresh();
for (Client client : internalCluster().getClients()) {
ValidateQueryResponse response = client.admin().indices().prepareValidateQuery("test").setQuery(QueryBuilders.wrapperQuery("foo".getBytes(StandardCharsets.UTF_8))).setExplain(true).execute().actionGet();
assertThat(response.isValid(), equalTo(false));
assertThat(response.getQueryExplanation().size(), equalTo(1));
assertThat(response.getQueryExplanation().get(0).getError(), containsString("Failed to derive xcontent"));
assertThat(response.getQueryExplanation().get(0).getExplanation(), nullValue());
}
for (Client client : internalCluster().getClients()) {
ValidateQueryResponse response = client.admin().indices().prepareValidateQuery("test").setQuery(QueryBuilders.queryStringQuery("foo")).setExplain(true).execute().actionGet();
assertThat(response.isValid(), equalTo(true));
assertThat(response.getQueryExplanation().size(), equalTo(1));
assertThat(response.getQueryExplanation().get(0).getExplanation(), equalTo("(foo:foo | baz:foo)"));
assertThat(response.getQueryExplanation().get(0).getError(), nullValue());
}
}
Aggregations