use of org.graylog.shaded.elasticsearch6.org.elasticsearch.index.query.TermsQueryBuilder in project elasticsearch by elastic.
the class ContextAndHeaderTransportIT method testThatTermsLookupGetRequestContainsContextAndHeaders.
public void testThatTermsLookupGetRequestContainsContextAndHeaders() throws Exception {
transportClient().prepareIndex(lookupIndex, "type", "1").setSource(jsonBuilder().startObject().array("followers", "foo", "bar", "baz").endObject()).get();
transportClient().prepareIndex(queryIndex, "type", "1").setSource(jsonBuilder().startObject().field("username", "foo").endObject()).get();
transportClient().admin().indices().prepareRefresh(queryIndex, lookupIndex).get();
TermsLookup termsLookup = new TermsLookup(lookupIndex, "type", "1", "followers");
TermsQueryBuilder termsLookupFilterBuilder = QueryBuilders.termsLookupQuery("username", termsLookup);
BoolQueryBuilder queryBuilder = QueryBuilders.boolQuery().must(QueryBuilders.matchAllQuery()).must(termsLookupFilterBuilder);
SearchResponse searchResponse = transportClient().prepareSearch(queryIndex).setQuery(queryBuilder).get();
assertNoFailures(searchResponse);
assertHitCount(searchResponse, 1);
assertGetRequestsContainHeaders();
}
use of org.graylog.shaded.elasticsearch6.org.elasticsearch.index.query.TermsQueryBuilder in project elasticsearch by elastic.
the class ShrinkIndexIT method testCreateShrinkIndexFails.
/**
* Tests that we can manually recover from a failed allocation due to shards being moved away etc.
*/
public void testCreateShrinkIndexFails() throws Exception {
internalCluster().ensureAtLeastNumDataNodes(2);
prepareCreate("source").setSettings(Settings.builder().put(indexSettings()).put("number_of_shards", randomIntBetween(2, 7)).put("number_of_replicas", 0)).get();
for (int i = 0; i < 20; i++) {
client().prepareIndex("source", randomFrom("t1", "t2", "t3")).setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get();
}
ImmutableOpenMap<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes();
assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2);
DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class);
String spareNode = discoveryNodes[0].getName();
String mergeNode = discoveryNodes[1].getName();
// ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node
// if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due
// to the require._name below.
ensureGreen();
// relocate all shards to one node such that we can merge it.
client().admin().indices().prepareUpdateSettings("source").setSettings(Settings.builder().put("index.routing.allocation.require._name", mergeNode).put("index.blocks.write", true)).get();
ensureGreen();
// now merge source into a single shard index
client().admin().indices().prepareShrinkIndex("source", "target").setWaitForActiveShards(ActiveShardCount.NONE).setSettings(Settings.builder().put("index.routing.allocation.exclude._name", // we manually exclude the merge node to forcefully fuck it up
mergeNode).put("index.number_of_replicas", 0).put("index.allocation.max_retries", 1).build()).get();
client().admin().cluster().prepareHealth("target").setWaitForEvents(Priority.LANGUID).get();
// now we move all shards away from the merge node
client().admin().indices().prepareUpdateSettings("source").setSettings(Settings.builder().put("index.routing.allocation.require._name", spareNode).put("index.blocks.write", true)).get();
ensureGreen("source");
// erase the forcefully fuckup!
client().admin().indices().prepareUpdateSettings("target").setSettings(Settings.builder().putNull("index.routing.allocation.exclude._name")).get();
// wait until it fails
assertBusy(() -> {
ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get();
RoutingTable routingTables = clusterStateResponse.getState().routingTable();
assertTrue(routingTables.index("target").shard(0).getShards().get(0).unassigned());
assertEquals(UnassignedInfo.Reason.ALLOCATION_FAILED, routingTables.index("target").shard(0).getShards().get(0).unassignedInfo().getReason());
assertEquals(1, routingTables.index("target").shard(0).getShards().get(0).unassignedInfo().getNumFailedAllocations());
});
// now relocate them all to the right node
client().admin().indices().prepareUpdateSettings("source").setSettings(Settings.builder().put("index.routing.allocation.require._name", mergeNode)).get();
ensureGreen("source");
final InternalClusterInfoService infoService = (InternalClusterInfoService) internalCluster().getInstance(ClusterInfoService.class, internalCluster().getMasterName());
infoService.refresh();
// kick off a retry and wait until it's done!
ClusterRerouteResponse clusterRerouteResponse = client().admin().cluster().prepareReroute().setRetryFailed(true).get();
long expectedShardSize = clusterRerouteResponse.getState().routingTable().index("target").shard(0).getShards().get(0).getExpectedShardSize();
// we support the expected shard size in the allocator to sum up over the source index shards
assertTrue("expected shard size must be set but wasn't: " + expectedShardSize, expectedShardSize > 0);
ensureGreen();
assertHitCount(client().prepareSearch("target").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);
}
use of org.graylog.shaded.elasticsearch6.org.elasticsearch.index.query.TermsQueryBuilder in project elasticsearch by elastic.
the class ShrinkIndexIT method testCreateShrinkIndex.
public void testCreateShrinkIndex() {
internalCluster().ensureAtLeastNumDataNodes(2);
Version version = VersionUtils.randomVersion(random());
prepareCreate("source").setSettings(Settings.builder().put(indexSettings()).put("number_of_shards", randomIntBetween(2, 7)).put("index.version.created", version)).get();
for (int i = 0; i < 20; i++) {
client().prepareIndex("source", randomFrom("t1", "t2", "t3")).setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get();
}
ImmutableOpenMap<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes();
assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2);
DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class);
String mergeNode = discoveryNodes[0].getName();
// ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node
// if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due
// to the require._name below.
ensureGreen();
// relocate all shards to one node such that we can merge it.
client().admin().indices().prepareUpdateSettings("source").setSettings(Settings.builder().put("index.routing.allocation.require._name", mergeNode).put("index.blocks.write", true)).get();
ensureGreen();
// now merge source into a single shard index
final boolean createWithReplicas = randomBoolean();
assertAcked(client().admin().indices().prepareShrinkIndex("source", "target").setSettings(Settings.builder().put("index.number_of_replicas", createWithReplicas ? 1 : 0).build()).get());
ensureGreen();
assertHitCount(client().prepareSearch("target").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);
if (createWithReplicas == false) {
// bump replicas
client().admin().indices().prepareUpdateSettings("target").setSettings(Settings.builder().put("index.number_of_replicas", 1)).get();
ensureGreen();
assertHitCount(client().prepareSearch("target").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);
}
for (int i = 20; i < 40; i++) {
client().prepareIndex("target", randomFrom("t1", "t2", "t3")).setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get();
}
flushAndRefresh();
assertHitCount(client().prepareSearch("target").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 40);
assertHitCount(client().prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);
GetSettingsResponse target = client().admin().indices().prepareGetSettings("target").get();
assertEquals(version, target.getIndexToSettings().get("target").getAsVersion("index.version.created", null));
}
use of org.graylog.shaded.elasticsearch6.org.elasticsearch.index.query.TermsQueryBuilder in project bw-calendar-engine by Bedework.
the class BwIndexEsImpl method multiColFetch.
/* ========================================================================
* private methods
* ======================================================================== */
private SearchHits multiColFetch(final List<String> hrefs) throws CalFacadeException {
final int batchSize = hrefs.size();
final SearchRequestBuilder srb = getClient().prepareSearch(searchIndexes);
final TermsQueryBuilder tqb = new TermsQueryBuilder(ESQueryFilter.getJname(PropertyInfoIndex.HREF), hrefs);
srb.setSearchType(SearchType.QUERY_THEN_FETCH).setQuery(tqb);
srb.setFrom(0);
srb.setSize(batchSize);
if (debug) {
debug("MultiColFetch: targetIndex=" + targetIndex + "; srb=" + srb);
}
final SearchResponse resp = srb.execute().actionGet();
if (resp.status() != RestStatus.OK) {
if (debug) {
debug("Search returned status " + resp.status());
}
return null;
}
final SearchHits hits = resp.getHits();
if ((hits.getHits() == null) || (hits.getHits().length == 0)) {
return null;
}
// Break condition: No hits are returned
if (hits.hits().length == 0) {
return null;
}
return hits;
}
use of org.graylog.shaded.elasticsearch6.org.elasticsearch.index.query.TermsQueryBuilder in project opencast by opencast.
the class AbstractElasticsearchQueryBuilder method createQuery.
/**
* Create the actual query. We start with a query that matches everything, then move to the boolean conditions,
* finally add filter queries.
*/
private void createQuery() {
queryBuilder = new MatchAllQueryBuilder();
// The boolean query builder
BoolQueryBuilder booleanQuery = new BoolQueryBuilder();
// Terms
if (searchTerms != null) {
for (Map.Entry<String, Set<Object>> entry : searchTerms.entrySet()) {
Set<Object> values = entry.getValue();
if (values.size() == 1)
booleanQuery.must(new TermsQueryBuilder(entry.getKey(), values.iterator().next()));
else
booleanQuery.must(new TermsQueryBuilder(entry.getKey(), values.toArray(new String[values.size()])));
}
this.queryBuilder = booleanQuery;
}
// Negative terms
if (negativeSearchTerms != null) {
for (Map.Entry<String, Set<Object>> entry : negativeSearchTerms.entrySet()) {
Set<Object> values = entry.getValue();
if (values.size() == 1)
booleanQuery.mustNot(new TermsQueryBuilder(entry.getKey(), values.iterator().next()));
else
booleanQuery.mustNot(new TermsQueryBuilder(entry.getKey(), values.toArray(new String[values.size()])));
}
this.queryBuilder = booleanQuery;
}
// Date ranges
if (dateRanges != null) {
for (DateRange dr : dateRanges) {
booleanQuery.must(dr.getQueryBuilder());
}
this.queryBuilder = booleanQuery;
}
// Text
if (text != null) {
QueryStringQueryBuilder queryBuilder = QueryBuilders.queryString(text).field(TEXT);
booleanQuery.must(queryBuilder);
this.queryBuilder = booleanQuery;
}
// Fuzzy text
if (fuzzyText != null) {
FuzzyLikeThisQueryBuilder fuzzyQueryBuilder = QueryBuilders.fuzzyLikeThisQuery(TEXT_FUZZY).likeText(fuzzyText);
booleanQuery.must(fuzzyQueryBuilder);
this.queryBuilder = booleanQuery;
}
QueryBuilder unfilteredQuery = queryBuilder;
List<FilterBuilder> filters = new ArrayList<FilterBuilder>();
// Add filtering for AND terms
if (groups != null) {
for (ValueGroup group : groups) {
filters.addAll(group.getFilterBuilders());
}
}
// Non-Empty fields
if (nonEmptyFields != null) {
for (String field : nonEmptyFields) {
filters.add(FilterBuilders.existsFilter(field));
}
}
// Empty fields
if (emptyFields != null) {
for (String field : emptyFields) {
filters.add(FilterBuilders.missingFilter(field));
}
}
// Filter expressions
if (filter != null) {
filters.add(FilterBuilders.termFilter(IndexSchema.TEXT, filter));
}
// Apply the filters
if (filters.size() == 1) {
this.queryBuilder = QueryBuilders.filteredQuery(unfilteredQuery, filters.get(0));
} else if (filters.size() > 1) {
FilterBuilder andFilter = FilterBuilders.andFilter(filters.toArray(new FilterBuilder[filters.size()]));
this.queryBuilder = QueryBuilders.filteredQuery(unfilteredQuery, andFilter);
}
}
Aggregations