Search in sources :

Example 31 with SearchPhaseExecutionException

use of org.elasticsearch.action.search.SearchPhaseExecutionException in project elasticsearch by elastic.

the class ElasticsearchExceptionTests method randomExceptions.

public static Tuple<Throwable, ElasticsearchException> randomExceptions() {
    Throwable actual;
    ElasticsearchException expected;
    int type = randomIntBetween(0, 5);
    switch(type) {
        case 0:
            actual = new ClusterBlockException(singleton(DiscoverySettings.NO_MASTER_BLOCK_WRITES));
            expected = new ElasticsearchException("Elasticsearch exception [type=cluster_block_exception, " + "reason=blocked by: [SERVICE_UNAVAILABLE/2/no master];]");
            break;
        case 1:
            actual = new CircuitBreakingException("Data too large", 123, 456);
            expected = new ElasticsearchException("Elasticsearch exception [type=circuit_breaking_exception, reason=Data too large]");
            break;
        case 2:
            actual = new SearchParseException(new TestSearchContext(null), "Parse failure", new XContentLocation(12, 98));
            expected = new ElasticsearchException("Elasticsearch exception [type=search_parse_exception, reason=Parse failure]");
            break;
        case 3:
            actual = new IllegalArgumentException("Closed resource", new RuntimeException("Resource"));
            expected = new ElasticsearchException("Elasticsearch exception [type=illegal_argument_exception, reason=Closed resource]", new ElasticsearchException("Elasticsearch exception [type=runtime_exception, reason=Resource]"));
            break;
        case 4:
            actual = new SearchPhaseExecutionException("search", "all shards failed", new ShardSearchFailure[] { new ShardSearchFailure(new ParsingException(1, 2, "foobar", null), new SearchShardTarget("node_1", new Index("foo", "_na_"), 1)) });
            expected = new ElasticsearchException("Elasticsearch exception [type=search_phase_execution_exception, " + "reason=all shards failed]");
            expected.addMetadata("es.phase", "search");
            break;
        case 5:
            actual = new ElasticsearchException("Parsing failed", new ParsingException(9, 42, "Wrong state", new NullPointerException("Unexpected null value")));
            ElasticsearchException expectedCause = new ElasticsearchException("Elasticsearch exception [type=parsing_exception, " + "reason=Wrong state]", new ElasticsearchException("Elasticsearch exception [type=null_pointer_exception, " + "reason=Unexpected null value]"));
            expected = new ElasticsearchException("Elasticsearch exception [type=exception, reason=Parsing failed]", expectedCause);
            break;
        default:
            throw new UnsupportedOperationException("No randomized exceptions generated for type [" + type + "]");
    }
    if (actual instanceof ElasticsearchException) {
        ElasticsearchException actualException = (ElasticsearchException) actual;
        if (randomBoolean()) {
            int nbHeaders = randomIntBetween(1, 5);
            Map<String, List<String>> randomHeaders = new HashMap<>(nbHeaders);
            for (int i = 0; i < nbHeaders; i++) {
                List<String> values = new ArrayList<>();
                int nbValues = randomIntBetween(1, 3);
                for (int j = 0; j < nbValues; j++) {
                    values.add(frequently() ? randomAsciiOfLength(5) : "");
                }
                randomHeaders.put("header_" + i, values);
            }
            for (Map.Entry<String, List<String>> entry : randomHeaders.entrySet()) {
                actualException.addHeader(entry.getKey(), entry.getValue());
                expected.addHeader(entry.getKey(), entry.getValue());
            }
            if (rarely()) {
                // Empty or null headers are not printed out by the toXContent method
                actualException.addHeader("ignored", randomBoolean() ? emptyList() : null);
            }
        }
        if (randomBoolean()) {
            int nbMetadata = randomIntBetween(1, 5);
            Map<String, List<String>> randomMetadata = new HashMap<>(nbMetadata);
            for (int i = 0; i < nbMetadata; i++) {
                List<String> values = new ArrayList<>();
                int nbValues = randomIntBetween(1, 3);
                for (int j = 0; j < nbValues; j++) {
                    values.add(frequently() ? randomAsciiOfLength(5) : "");
                }
                randomMetadata.put("es.metadata_" + i, values);
            }
            for (Map.Entry<String, List<String>> entry : randomMetadata.entrySet()) {
                actualException.addMetadata(entry.getKey(), entry.getValue());
                expected.addMetadata(entry.getKey(), entry.getValue());
            }
            if (rarely()) {
                // Empty or null metadata are not printed out by the toXContent method
                actualException.addMetadata("es.ignored", randomBoolean() ? emptyList() : null);
            }
        }
        if (randomBoolean()) {
            int nbResources = randomIntBetween(1, 5);
            for (int i = 0; i < nbResources; i++) {
                String resourceType = "type_" + i;
                String[] resourceIds = new String[randomIntBetween(1, 3)];
                for (int j = 0; j < resourceIds.length; j++) {
                    resourceIds[j] = frequently() ? randomAsciiOfLength(5) : "";
                }
                actualException.setResources(resourceType, resourceIds);
                expected.setResources(resourceType, resourceIds);
            }
        }
    }
    return new Tuple<>(actual, expected);
}
Also used : HashMap(java.util.HashMap) SearchPhaseExecutionException(org.elasticsearch.action.search.SearchPhaseExecutionException) ArrayList(java.util.ArrayList) Index(org.elasticsearch.index.Index) ParsingException(org.elasticsearch.common.ParsingException) Collections.singletonList(java.util.Collections.singletonList) Collections.emptyList(java.util.Collections.emptyList) List(java.util.List) ArrayList(java.util.ArrayList) ShardSearchFailure(org.elasticsearch.action.search.ShardSearchFailure) XContentLocation(org.elasticsearch.common.xcontent.XContentLocation) ClusterBlockException(org.elasticsearch.cluster.block.ClusterBlockException) TestSearchContext(org.elasticsearch.test.TestSearchContext) SearchParseException(org.elasticsearch.search.SearchParseException) CircuitBreakingException(org.elasticsearch.common.breaker.CircuitBreakingException) SearchShardTarget(org.elasticsearch.search.SearchShardTarget) Map(java.util.Map) HashMap(java.util.HashMap) Tuple(org.elasticsearch.common.collect.Tuple)

Example 32 with SearchPhaseExecutionException

use of org.elasticsearch.action.search.SearchPhaseExecutionException in project elasticsearch by elastic.

the class RandomExceptionCircuitBreakerIT method testBreakerWithRandomExceptions.

public void testBreakerWithRandomExceptions() throws IOException, InterruptedException, ExecutionException {
    for (NodeStats node : client().admin().cluster().prepareNodesStats().clear().setBreaker(true).execute().actionGet().getNodes()) {
        assertThat("Breaker is not set to 0", node.getBreaker().getStats(CircuitBreaker.FIELDDATA).getEstimated(), equalTo(0L));
    }
    String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties").startObject("test-str").field("type", "keyword").field("doc_values", randomBoolean()).endObject().startObject("test-num").field("type", randomFrom(Arrays.asList("float", "long", "double", "short", "integer"))).endObject().endObject().endObject().endObject().string();
    final double topLevelRate;
    final double lowLevelRate;
    if (frequently()) {
        if (randomBoolean()) {
            if (randomBoolean()) {
                lowLevelRate = 1.0 / between(2, 10);
                topLevelRate = 0.0d;
            } else {
                topLevelRate = 1.0 / between(2, 10);
                lowLevelRate = 0.0d;
            }
        } else {
            lowLevelRate = 1.0 / between(2, 10);
            topLevelRate = 1.0 / between(2, 10);
        }
    } else {
        // rarely no exception
        topLevelRate = 0d;
        lowLevelRate = 0d;
    }
    Settings.Builder settings = Settings.builder().put(indexSettings()).put(EXCEPTION_TOP_LEVEL_RATIO_KEY, topLevelRate).put(EXCEPTION_LOW_LEVEL_RATIO_KEY, lowLevelRate).put(MockEngineSupport.WRAP_READER_RATIO.getKey(), 1.0d);
    logger.info("creating index: [test] using settings: [{}]", settings.build().getAsMap());
    CreateIndexResponse response = client().admin().indices().prepareCreate("test").setSettings(settings).addMapping("type", mapping, XContentType.JSON).execute().actionGet();
    final int numDocs;
    if (response.isShardsAcked() == false) {
        /* some seeds just won't let you create the index at all and we enter a ping-pong mode
             * trying one node after another etc. that is ok but we need to make sure we don't wait
             * forever when indexing documents so we set numDocs = 1 and expect all shards to fail
             * when we search below.*/
        if (response.isAcknowledged()) {
            logger.info("Index creation timed out waiting for primaries to start - only index one doc and expect searches to fail");
        } else {
            logger.info("Index creation failed - only index one doc and expect searches to fail");
        }
        numDocs = 1;
    } else {
        numDocs = between(10, 100);
    }
    for (int i = 0; i < numDocs; i++) {
        try {
            client().prepareIndex("test", "type", "" + i).setTimeout(TimeValue.timeValueSeconds(1)).setSource("test-str", randomUnicodeOfLengthBetween(5, 25), "test-num", i).get();
        } catch (ElasticsearchException ex) {
        }
    }
    logger.info("Start Refresh");
    // don't assert on failures here
    RefreshResponse refreshResponse = client().admin().indices().prepareRefresh("test").execute().get();
    final boolean refreshFailed = refreshResponse.getShardFailures().length != 0 || refreshResponse.getFailedShards() != 0;
    logger.info("Refresh failed: [{}] numShardsFailed: [{}], shardFailuresLength: [{}], successfulShards: [{}], totalShards: [{}] ", refreshFailed, refreshResponse.getFailedShards(), refreshResponse.getShardFailures().length, refreshResponse.getSuccessfulShards(), refreshResponse.getTotalShards());
    final int numSearches = scaledRandomIntBetween(50, 150);
    NodesStatsResponse resp = client().admin().cluster().prepareNodesStats().clear().setBreaker(true).execute().actionGet();
    for (NodeStats stats : resp.getNodes()) {
        assertThat("Breaker is set to 0", stats.getBreaker().getStats(CircuitBreaker.FIELDDATA).getEstimated(), equalTo(0L));
    }
    for (int i = 0; i < numSearches; i++) {
        SearchRequestBuilder searchRequestBuilder = client().prepareSearch().setQuery(QueryBuilders.matchAllQuery());
        if (random().nextBoolean()) {
            searchRequestBuilder.addSort("test-str", SortOrder.ASC);
        }
        searchRequestBuilder.addSort("test-num", SortOrder.ASC);
        boolean success = false;
        try {
            // Sort by the string and numeric fields, to load them into field data
            searchRequestBuilder.get();
            success = true;
        } catch (SearchPhaseExecutionException ex) {
            logger.info("expected SearchPhaseException: [{}]", ex.getMessage());
        }
        if (frequently()) {
            // Now, clear the cache and check that the circuit breaker has been
            // successfully set back to zero. If there is a bug in the circuit
            // breaker adjustment code, it should show up here by the breaker
            // estimate being either positive or negative.
            // make sure all shards are there - there could be shards that are still starting up.
            ensureGreen("test");
            assertAllSuccessful(client().admin().indices().prepareClearCache("test").setFieldDataCache(true).execute().actionGet());
            // Since .cleanUp() is no longer called on cache clear, we need to call it on each node manually
            for (String node : internalCluster().getNodeNames()) {
                final IndicesFieldDataCache fdCache = internalCluster().getInstance(IndicesService.class, node).getIndicesFieldDataCache();
                // Clean up the cache, ensuring that entries' listeners have been called
                fdCache.getCache().refresh();
            }
            NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().clear().setBreaker(true).execute().actionGet();
            for (NodeStats stats : nodeStats.getNodes()) {
                assertThat("Breaker reset to 0 last search success: " + success + " mapping: " + mapping, stats.getBreaker().getStats(CircuitBreaker.FIELDDATA).getEstimated(), equalTo(0L));
            }
        }
    }
}
Also used : SearchRequestBuilder(org.elasticsearch.action.search.SearchRequestBuilder) SearchPhaseExecutionException(org.elasticsearch.action.search.SearchPhaseExecutionException) IndicesService(org.elasticsearch.indices.IndicesService) ElasticsearchException(org.elasticsearch.ElasticsearchException) NodesStatsResponse(org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse) NodeStats(org.elasticsearch.action.admin.cluster.node.stats.NodeStats) IndicesFieldDataCache(org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache) RefreshResponse(org.elasticsearch.action.admin.indices.refresh.RefreshResponse) CreateIndexResponse(org.elasticsearch.action.admin.indices.create.CreateIndexResponse) Settings(org.elasticsearch.common.settings.Settings)

Example 33 with SearchPhaseExecutionException

use of org.elasticsearch.action.search.SearchPhaseExecutionException in project elasticsearch by elastic.

the class SearchWhileRelocatingIT method testSearchAndRelocateConcurrently.

private void testSearchAndRelocateConcurrently(final int numberOfReplicas) throws Exception {
    final int numShards = between(1, 20);
    client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", numShards).put("index.number_of_replicas", numberOfReplicas)).addMapping("type", "loc", "type=geo_point", "test", "type=text").execute().actionGet();
    ensureGreen();
    List<IndexRequestBuilder> indexBuilders = new ArrayList<>();
    final int numDocs = between(10, 20);
    for (int i = 0; i < numDocs; i++) {
        indexBuilders.add(client().prepareIndex("test", "type", Integer.toString(i)).setSource(jsonBuilder().startObject().field("test", "value").startObject("loc").field("lat", 11).field("lon", 21).endObject().endObject()));
    }
    indexRandom(true, indexBuilders.toArray(new IndexRequestBuilder[indexBuilders.size()]));
    assertHitCount(client().prepareSearch().get(), (numDocs));
    final int numIters = scaledRandomIntBetween(5, 20);
    for (int i = 0; i < numIters; i++) {
        final AtomicBoolean stop = new AtomicBoolean(false);
        final List<String> nonCriticalExceptions = new CopyOnWriteArrayList<>();
        Thread[] threads = new Thread[scaledRandomIntBetween(1, 3)];
        for (int j = 0; j < threads.length; j++) {
            threads[j] = new Thread() {

                @Override
                public void run() {
                    try {
                        while (!stop.get()) {
                            SearchResponse sr = client().prepareSearch().setSize(numDocs).get();
                            if (sr.getHits().getTotalHits() != numDocs) {
                                // request comes in. It's a small window but a known limitation.
                                if (sr.getTotalShards() != sr.getSuccessfulShards() && sr.getFailedShards() == 0) {
                                    nonCriticalExceptions.add("Count is " + sr.getHits().getTotalHits() + " but " + numDocs + " was expected. " + formatShardStatus(sr));
                                } else {
                                    assertHitCount(sr, numDocs);
                                }
                            }
                            final SearchHits sh = sr.getHits();
                            assertThat("Expected hits to be the same size the actual hits array", sh.getTotalHits(), equalTo((long) (sh.getHits().length)));
                        // this is the more critical but that we hit the actual hit array has a different size than the
                        // actual number of hits.
                        }
                    } catch (SearchPhaseExecutionException ex) {
                        // with replicas this should not happen
                        if (numberOfReplicas == 1 || !ex.getMessage().contains("all shards failed")) {
                            throw ex;
                        }
                    }
                }
            };
        }
        for (int j = 0; j < threads.length; j++) {
            threads[j].start();
        }
        allowNodes("test", between(1, 3));
        client().admin().cluster().prepareReroute().get();
        stop.set(true);
        for (int j = 0; j < threads.length; j++) {
            threads[j].join();
        }
        // this might time out on some machines if they are really busy and you hit lots of throttling
        ClusterHealthResponse resp = client().admin().cluster().prepareHealth().setWaitForYellowStatus().setWaitForNoRelocatingShards(true).setWaitForEvents(Priority.LANGUID).setTimeout("5m").get();
        assertNoTimeout(resp);
        // if we hit only non-critical exceptions we make sure that the post search works
        if (!nonCriticalExceptions.isEmpty()) {
            logger.info("non-critical exceptions: {}", nonCriticalExceptions);
            for (int j = 0; j < 10; j++) {
                assertHitCount(client().prepareSearch().get(), numDocs);
            }
        }
    }
}
Also used : ClusterHealthResponse(org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse) SearchPhaseExecutionException(org.elasticsearch.action.search.SearchPhaseExecutionException) ArrayList(java.util.ArrayList) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) SearchResponse(org.elasticsearch.action.search.SearchResponse) IndexRequestBuilder(org.elasticsearch.action.index.IndexRequestBuilder) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) SearchHits(org.elasticsearch.search.SearchHits) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList)

Example 34 with SearchPhaseExecutionException

use of org.elasticsearch.action.search.SearchPhaseExecutionException in project elasticsearch by elastic.

the class SearchWithRandomExceptionsIT method testRandomExceptions.

public void testRandomExceptions() throws IOException, InterruptedException, ExecutionException {
    String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties").startObject("test").field("type", "keyword").endObject().endObject().endObject().endObject().string();
    final double lowLevelRate;
    final double topLevelRate;
    if (frequently()) {
        if (randomBoolean()) {
            if (randomBoolean()) {
                lowLevelRate = 1.0 / between(2, 10);
                topLevelRate = 0.0d;
            } else {
                topLevelRate = 1.0 / between(2, 10);
                lowLevelRate = 0.0d;
            }
        } else {
            lowLevelRate = 1.0 / between(2, 10);
            topLevelRate = 1.0 / between(2, 10);
        }
    } else {
        // rarely no exception
        topLevelRate = 0d;
        lowLevelRate = 0d;
    }
    Builder settings = Settings.builder().put(indexSettings()).put(EXCEPTION_TOP_LEVEL_RATIO_KEY, topLevelRate).put(EXCEPTION_LOW_LEVEL_RATIO_KEY, lowLevelRate).put(MockEngineSupport.WRAP_READER_RATIO.getKey(), 1.0d);
    logger.info("creating index: [test] using settings: [{}]", settings.build().getAsMap());
    assertAcked(prepareCreate("test").setSettings(settings).addMapping("type", mapping, XContentType.JSON));
    ensureSearchable();
    final int numDocs = between(10, 100);
    int numCreated = 0;
    boolean[] added = new boolean[numDocs];
    for (int i = 0; i < numDocs; i++) {
        try {
            IndexResponse indexResponse = client().prepareIndex("test", "type", "" + i).setTimeout(TimeValue.timeValueSeconds(1)).setSource("test", English.intToEnglish(i)).get();
            if (indexResponse.getResult() == DocWriteResponse.Result.CREATED) {
                numCreated++;
                added[i] = true;
            }
        } catch (ElasticsearchException ex) {
        }
    }
    logger.info("Start Refresh");
    // don't assert on failures here
    RefreshResponse refreshResponse = client().admin().indices().prepareRefresh("test").execute().get();
    final boolean refreshFailed = refreshResponse.getShardFailures().length != 0 || refreshResponse.getFailedShards() != 0;
    logger.info("Refresh failed [{}] numShardsFailed: [{}], shardFailuresLength: [{}], successfulShards: [{}], totalShards: [{}] ", refreshFailed, refreshResponse.getFailedShards(), refreshResponse.getShardFailures().length, refreshResponse.getSuccessfulShards(), refreshResponse.getTotalShards());
    NumShards test = getNumShards("test");
    final int numSearches = scaledRandomIntBetween(100, 200);
    // we don't check anything here really just making sure we don't leave any open files or a broken index behind.
    for (int i = 0; i < numSearches; i++) {
        try {
            int docToQuery = between(0, numDocs - 1);
            int expectedResults = added[docToQuery] ? 1 : 0;
            logger.info("Searching for [test:{}]", English.intToEnglish(docToQuery));
            SearchResponse searchResponse = client().prepareSearch().setQuery(QueryBuilders.matchQuery("test", English.intToEnglish(docToQuery))).setSize(expectedResults).get();
            logger.info("Successful shards: [{}]  numShards: [{}]", searchResponse.getSuccessfulShards(), test.numPrimaries);
            if (searchResponse.getSuccessfulShards() == test.numPrimaries && !refreshFailed) {
                assertResultsAndLogOnFailure(expectedResults, searchResponse);
            }
            // check match all
            searchResponse = client().prepareSearch().setQuery(QueryBuilders.matchAllQuery()).setSize(numCreated).addSort("_id", SortOrder.ASC).get();
            logger.info("Match all Successful shards: [{}]  numShards: [{}]", searchResponse.getSuccessfulShards(), test.numPrimaries);
            if (searchResponse.getSuccessfulShards() == test.numPrimaries && !refreshFailed) {
                assertResultsAndLogOnFailure(numCreated, searchResponse);
            }
        } catch (SearchPhaseExecutionException ex) {
            logger.info("expected SearchPhaseException: [{}]", ex.getMessage());
        }
    }
}
Also used : RefreshResponse(org.elasticsearch.action.admin.indices.refresh.RefreshResponse) IndexResponse(org.elasticsearch.action.index.IndexResponse) Builder(org.elasticsearch.common.settings.Settings.Builder) SearchPhaseExecutionException(org.elasticsearch.action.search.SearchPhaseExecutionException) ElasticsearchException(org.elasticsearch.ElasticsearchException) SearchResponse(org.elasticsearch.action.search.SearchResponse)

Example 35 with SearchPhaseExecutionException

use of org.elasticsearch.action.search.SearchPhaseExecutionException in project elasticsearch by elastic.

the class ElasticsearchAssertions method assertFailures.

public static void assertFailures(SearchRequestBuilder searchRequestBuilder, RestStatus restStatus, Matcher<String> reasonMatcher) {
    //we can either run into partial or total failures depending on the current number of shards
    try {
        SearchResponse searchResponse = searchRequestBuilder.get();
        assertThat("Expected shard failures, got none", searchResponse.getShardFailures().length, greaterThan(0));
        for (ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) {
            assertThat(shardSearchFailure.status(), equalTo(restStatus));
            assertThat(shardSearchFailure.reason(), reasonMatcher);
        }
        assertVersionSerializable(searchResponse);
    } catch (SearchPhaseExecutionException e) {
        assertThat(e.status(), equalTo(restStatus));
        assertThat(e.toString(), reasonMatcher);
        for (ShardSearchFailure shardSearchFailure : e.shardFailures()) {
            assertThat(shardSearchFailure.status(), equalTo(restStatus));
            assertThat(shardSearchFailure.reason(), reasonMatcher);
        }
    } catch (Exception e) {
        fail("SearchPhaseExecutionException expected but got " + e.getClass());
    }
}
Also used : SearchPhaseExecutionException(org.elasticsearch.action.search.SearchPhaseExecutionException) ShardSearchFailure(org.elasticsearch.action.search.ShardSearchFailure) ElasticsearchException(org.elasticsearch.ElasticsearchException) SearchPhaseExecutionException(org.elasticsearch.action.search.SearchPhaseExecutionException) InvocationTargetException(java.lang.reflect.InvocationTargetException) ClusterBlockException(org.elasticsearch.cluster.block.ClusterBlockException) IOException(java.io.IOException) ShardOperationFailedException(org.elasticsearch.action.ShardOperationFailedException) SearchResponse(org.elasticsearch.action.search.SearchResponse)

Aggregations

SearchPhaseExecutionException (org.elasticsearch.action.search.SearchPhaseExecutionException)43 SearchResponse (org.elasticsearch.action.search.SearchResponse)18 ShardSearchFailure (org.elasticsearch.action.search.ShardSearchFailure)10 Timed (com.codahale.metrics.annotation.Timed)6 ApiOperation (io.swagger.annotations.ApiOperation)6 ApiResponses (io.swagger.annotations.ApiResponses)6 IOException (java.io.IOException)6 GET (javax.ws.rs.GET)6 Produces (javax.ws.rs.Produces)6 TimeRange (org.graylog2.plugin.indexer.searches.timeranges.TimeRange)6 ArrayList (java.util.ArrayList)5 ElasticsearchException (org.elasticsearch.ElasticsearchException)5 SearchRequestBuilder (org.elasticsearch.action.search.SearchRequestBuilder)5 Matchers.containsString (org.hamcrest.Matchers.containsString)5 RefreshResponse (org.elasticsearch.action.admin.indices.refresh.RefreshResponse)4 IndexRequestBuilder (org.elasticsearch.action.index.IndexRequestBuilder)4 TimeValue (org.elasticsearch.common.unit.TimeValue)4 ElasticsearchAssertions.assertSearchResponse (org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse)4 HashMap (java.util.HashMap)2 List (java.util.List)2