use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.search.SearchResponse in project elasticsearch by elastic.
the class IndicesRequestIT method testSearchQueryThenFetch.
public void testSearchQueryThenFetch() throws Exception {
interceptTransportActions(SearchTransportService.QUERY_ACTION_NAME, SearchTransportService.FETCH_ID_ACTION_NAME, SearchTransportService.FREE_CONTEXT_ACTION_NAME);
String[] randomIndicesOrAliases = randomIndicesOrAliases();
for (int i = 0; i < randomIndicesOrAliases.length; i++) {
client().prepareIndex(randomIndicesOrAliases[i], "type", "id-" + i).setSource("field", "value").get();
}
refresh();
SearchRequest searchRequest = new SearchRequest(randomIndicesOrAliases).searchType(SearchType.QUERY_THEN_FETCH);
SearchResponse searchResponse = internalCluster().coordOnlyNodeClient().search(searchRequest).actionGet();
assertNoFailures(searchResponse);
assertThat(searchResponse.getHits().getTotalHits(), greaterThan(0L));
clearInterceptedActions();
assertSameIndices(searchRequest, SearchTransportService.QUERY_ACTION_NAME, SearchTransportService.FETCH_ID_ACTION_NAME);
//free context messages are not necessarily sent, but if they are, check their indices
assertSameIndicesOptionalRequests(searchRequest, SearchTransportService.FREE_CONTEXT_ACTION_NAME);
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.search.SearchResponse in project elasticsearch by elastic.
the class AsyncBulkByScrollActionTests method testScrollDelay.
public void testScrollDelay() throws Exception {
/*
* Replace the thread pool with one that will save the delay sent for the command. We'll use that to check that we used a proper
* delay for throttling.
*/
AtomicReference<TimeValue> capturedDelay = new AtomicReference<>();
AtomicReference<Runnable> capturedCommand = new AtomicReference<>();
setupClient(new TestThreadPool(getTestName()) {
@Override
public ScheduledFuture<?> schedule(TimeValue delay, String name, Runnable command) {
capturedDelay.set(delay);
capturedCommand.set(command);
return null;
}
});
DummyAsyncBulkByScrollAction action = new DummyAsyncBulkByScrollAction();
action.setScroll(scrollId());
// Set the base for the scroll to wait - this is added to the figure we calculate below
firstSearchRequest.scroll(timeValueSeconds(10));
// Set throttle to 1 request per second to make the math simpler
testTask.rethrottle(1f);
// Make the last batch look nearly instant but have 100 documents
action.startNextScroll(timeValueNanos(System.nanoTime()), 100);
// So the next request is going to have to wait an extra 100 seconds or so (base was 10 seconds, so 110ish)
assertThat(client.lastScroll.get().request.scroll().keepAlive().seconds(), either(equalTo(110L)).or(equalTo(109L)));
// Now we can simulate a response and check the delay that we used for the task
SearchHit hit = new SearchHit(0, "id", new Text("type"), emptyMap());
SearchHits hits = new SearchHits(new SearchHit[] { hit }, 0, 0);
InternalSearchResponse internalResponse = new InternalSearchResponse(hits, null, null, null, false, false, 1);
SearchResponse searchResponse = new SearchResponse(internalResponse, scrollId(), 5, 4, randomLong(), null);
if (randomBoolean()) {
client.lastScroll.get().listener.onResponse(searchResponse);
// The delay is still 100ish seconds because there hasn't been much time between when we requested the bulk and when we got it.
assertThat(capturedDelay.get().seconds(), either(equalTo(100L)).or(equalTo(99L)));
} else {
// Let's rethrottle between the starting the scroll and getting the response
testTask.rethrottle(10f);
client.lastScroll.get().listener.onResponse(searchResponse);
// The delay uses the new throttle
assertThat(capturedDelay.get().seconds(), either(equalTo(10L)).or(equalTo(9L)));
}
// Running the command ought to increment the delay counter on the task.
capturedCommand.get().run();
assertEquals(capturedDelay.get(), testTask.getStatus().getThrottled());
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.search.SearchResponse in project elasticsearch by elastic.
the class BulkWithUpdatesIT method testThatMissingIndexDoesNotAbortFullBulkRequest.
// issue 6410
public void testThatMissingIndexDoesNotAbortFullBulkRequest() throws Exception {
createIndex("bulkindex1", "bulkindex2");
BulkRequest bulkRequest = new BulkRequest();
bulkRequest.add(new IndexRequest("bulkindex1", "index1_type", "1").source(Requests.INDEX_CONTENT_TYPE, "text", "hallo1")).add(new IndexRequest("bulkindex2", "index2_type", "1").source(Requests.INDEX_CONTENT_TYPE, "text", "hallo2")).add(new IndexRequest("bulkindex2", "index2_type").source(Requests.INDEX_CONTENT_TYPE, "text", "hallo2")).add(new UpdateRequest("bulkindex2", "index2_type", "2").doc(Requests.INDEX_CONTENT_TYPE, "foo", "bar")).add(new DeleteRequest("bulkindex2", "index2_type", "3")).setRefreshPolicy(RefreshPolicy.IMMEDIATE);
client().bulk(bulkRequest).get();
SearchResponse searchResponse = client().prepareSearch("bulkindex*").get();
assertHitCount(searchResponse, 3);
assertAcked(client().admin().indices().prepareClose("bulkindex2"));
BulkResponse bulkResponse = client().bulk(bulkRequest).get();
assertThat(bulkResponse.hasFailures(), is(true));
assertThat(bulkResponse.getItems().length, is(5));
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.search.SearchResponse in project elasticsearch by elastic.
the class BulkWithUpdatesIT method testBulkIndexingWhileInitializing.
public void testBulkIndexingWhileInitializing() throws Exception {
int replica = randomInt(2);
internalCluster().ensureAtLeastNumDataNodes(1 + replica);
assertAcked(prepareCreate("test").setSettings(Settings.builder().put(indexSettings()).put("index.number_of_replicas", replica)));
int numDocs = scaledRandomIntBetween(100, 5000);
int bulk = scaledRandomIntBetween(1, 99);
for (int i = 0; i < numDocs; ) {
final BulkRequestBuilder builder = client().prepareBulk();
for (int j = 0; j < bulk && i < numDocs; j++, i++) {
builder.add(client().prepareIndex("test", "type1", Integer.toString(i)).setSource("val", i));
}
logger.info("bulk indexing {}-{}", i - bulk, i - 1);
BulkResponse response = builder.get();
if (response.hasFailures()) {
fail(response.buildFailureMessage());
}
}
refresh();
SearchResponse countResponse = client().prepareSearch().setSize(0).get();
assertHitCount(countResponse, numDocs);
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.search.SearchResponse in project elasticsearch by elastic.
the class BulkWithUpdatesIT method testBulkUpdateDocAsUpsertWithParent.
/*
Test for https://github.com/elastic/elasticsearch/issues/3444
*/
public void testBulkUpdateDocAsUpsertWithParent() throws Exception {
client().admin().indices().prepareCreate("test").addMapping("parent", "{\"parent\":{}}", XContentType.JSON).addMapping("child", "{\"child\": {\"_parent\": {\"type\": \"parent\"}}}", XContentType.JSON).execute().actionGet();
ensureGreen();
BulkRequestBuilder builder = client().prepareBulk();
// It's important to use JSON parsing here and request objects: issue 3444 is related to incomplete option parsing
byte[] addParent = new BytesArray("{" + " \"index\" : {" + " \"_index\" : \"test\"," + " \"_type\" : \"parent\"," + " \"_id\" : \"parent1\"" + " }" + "}" + "\n" + "{" + " \"field1\" : \"value1\"" + "}" + "\n").array();
byte[] addChild = new BytesArray("{" + " \"update\" : {" + " \"_index\" : \"test\"," + " \"_type\" : \"child\"," + " \"_id\" : \"child1\"," + " \"parent\" : \"parent1\"" + " }" + "}" + "\n" + "{" + " \"doc\" : {" + " \"field1\" : \"value1\"" + " }," + " \"doc_as_upsert\" : \"true\"" + "}" + "\n").array();
builder.add(addParent, 0, addParent.length, XContentType.JSON);
builder.add(addChild, 0, addChild.length, XContentType.JSON);
BulkResponse bulkResponse = builder.get();
assertThat(bulkResponse.getItems().length, equalTo(2));
assertThat(bulkResponse.getItems()[0].isFailed(), equalTo(false));
assertThat(bulkResponse.getItems()[1].isFailed(), equalTo(false));
client().admin().indices().prepareRefresh("test").get();
//we check that the _parent field was set on the child document by using the has parent query
SearchResponse searchResponse = client().prepareSearch("test").setQuery(QueryBuilders.hasParentQuery("parent", QueryBuilders.matchAllQuery(), false)).get();
assertNoFailures(searchResponse);
assertSearchHits(searchResponse, "child1");
}
Aggregations