use of org.elasticsearch.index.query.QueryShardContext in project elasticsearch by elastic.
the class ScriptedMetricAggregationBuilder method doBuild.
@Override
protected ScriptedMetricAggregatorFactory doBuild(SearchContext context, AggregatorFactory<?> parent, Builder subfactoriesBuilder) throws IOException {
QueryShardContext queryShardContext = context.getQueryShardContext();
Function<Map<String, Object>, ExecutableScript> executableInitScript;
if (initScript != null) {
executableInitScript = queryShardContext.getLazyExecutableScript(initScript, ScriptContext.Standard.AGGS);
} else {
executableInitScript = (p) -> null;
}
Function<Map<String, Object>, SearchScript> searchMapScript = queryShardContext.getLazySearchScript(mapScript, ScriptContext.Standard.AGGS);
Function<Map<String, Object>, ExecutableScript> executableCombineScript;
if (combineScript != null) {
executableCombineScript = queryShardContext.getLazyExecutableScript(combineScript, ScriptContext.Standard.AGGS);
} else {
executableCombineScript = (p) -> null;
}
return new ScriptedMetricAggregatorFactory(name, searchMapScript, executableInitScript, executableCombineScript, reduceScript, params, context, parent, subfactoriesBuilder, metaData);
}
use of org.elasticsearch.index.query.QueryShardContext in project elasticsearch by elastic.
the class PhraseSuggester method innerExecute.
/*
* More Ideas:
* - add ability to find whitespace problems -> we can build a poor mans decompounder with our index based on a automaton?
* - add ability to build different error models maybe based on a confusion matrix?
* - try to combine a token with its subsequent token to find / detect word splits (optional)
* - for this to work we need some way to defined the position length of a candidate
* - phonetic filters could be interesting here too for candidate selection
*/
@Override
public Suggestion<? extends Entry<? extends Option>> innerExecute(String name, PhraseSuggestionContext suggestion, IndexSearcher searcher, CharsRefBuilder spare) throws IOException {
double realWordErrorLikelihood = suggestion.realworldErrorLikelyhood();
final PhraseSuggestion response = new PhraseSuggestion(name, suggestion.getSize());
final IndexReader indexReader = searcher.getIndexReader();
List<PhraseSuggestionContext.DirectCandidateGenerator> generators = suggestion.generators();
final int numGenerators = generators.size();
final List<CandidateGenerator> gens = new ArrayList<>(generators.size());
for (int i = 0; i < numGenerators; i++) {
PhraseSuggestionContext.DirectCandidateGenerator generator = generators.get(i);
DirectSpellChecker directSpellChecker = generator.createDirectSpellChecker();
Terms terms = MultiFields.getTerms(indexReader, generator.field());
if (terms != null) {
gens.add(new DirectCandidateGenerator(directSpellChecker, generator.field(), generator.suggestMode(), indexReader, realWordErrorLikelihood, generator.size(), generator.preFilter(), generator.postFilter(), terms));
}
}
final String suggestField = suggestion.getField();
final Terms suggestTerms = MultiFields.getTerms(indexReader, suggestField);
if (gens.size() > 0 && suggestTerms != null) {
final NoisyChannelSpellChecker checker = new NoisyChannelSpellChecker(realWordErrorLikelihood, suggestion.getRequireUnigram(), suggestion.getTokenLimit());
final BytesRef separator = suggestion.separator();
WordScorer wordScorer = suggestion.model().newScorer(indexReader, suggestTerms, suggestField, realWordErrorLikelihood, separator);
Result checkerResult;
try (TokenStream stream = checker.tokenStream(suggestion.getAnalyzer(), suggestion.getText(), spare, suggestion.getField())) {
checkerResult = checker.getCorrections(stream, new MultiCandidateGeneratorWrapper(suggestion.getShardSize(), gens.toArray(new CandidateGenerator[gens.size()])), suggestion.maxErrors(), suggestion.getShardSize(), wordScorer, suggestion.confidence(), suggestion.gramSize());
}
PhraseSuggestion.Entry resultEntry = buildResultEntry(suggestion, spare, checkerResult.cutoffScore);
response.addTerm(resultEntry);
final BytesRefBuilder byteSpare = new BytesRefBuilder();
final Function<Map<String, Object>, ExecutableScript> collateScript = suggestion.getCollateQueryScript();
final boolean collatePrune = (collateScript != null) && suggestion.collatePrune();
for (int i = 0; i < checkerResult.corrections.length; i++) {
Correction correction = checkerResult.corrections[i];
spare.copyUTF8Bytes(correction.join(SEPARATOR, byteSpare, null, null));
boolean collateMatch = true;
if (collateScript != null) {
// Checks if the template query collateScript yields any documents
// from the index for a correction, collateMatch is updated
final Map<String, Object> vars = suggestion.getCollateScriptParams();
vars.put(SUGGESTION_TEMPLATE_VAR_NAME, spare.toString());
QueryShardContext shardContext = suggestion.getShardContext();
final ExecutableScript executable = collateScript.apply(vars);
final BytesReference querySource = (BytesReference) executable.run();
try (XContentParser parser = XContentFactory.xContent(querySource).createParser(shardContext.getXContentRegistry(), querySource)) {
QueryBuilder innerQueryBuilder = shardContext.newParseContext(parser).parseInnerQueryBuilder();
final ParsedQuery parsedQuery = shardContext.toQuery(innerQueryBuilder);
collateMatch = Lucene.exists(searcher, parsedQuery.query());
}
}
if (!collateMatch && !collatePrune) {
continue;
}
Text phrase = new Text(spare.toString());
Text highlighted = null;
if (suggestion.getPreTag() != null) {
spare.copyUTF8Bytes(correction.join(SEPARATOR, byteSpare, suggestion.getPreTag(), suggestion.getPostTag()));
highlighted = new Text(spare.toString());
}
if (collatePrune) {
resultEntry.addOption(new Suggestion.Entry.Option(phrase, highlighted, (float) (correction.score), collateMatch));
} else {
resultEntry.addOption(new Suggestion.Entry.Option(phrase, highlighted, (float) (correction.score)));
}
}
} else {
response.addTerm(buildResultEntry(suggestion, spare, Double.MIN_VALUE));
}
return response;
}
use of org.elasticsearch.index.query.QueryShardContext in project elasticsearch by elastic.
the class SearchSlowLogTests method createSearchContext.
@Override
protected SearchContext createSearchContext(IndexService indexService) {
BigArrays bigArrays = indexService.getBigArrays();
ThreadPool threadPool = indexService.getThreadPool();
return new TestSearchContext(threadPool, bigArrays, indexService) {
final ShardSearchRequest request = new ShardSearchRequest() {
private SearchSourceBuilder searchSourceBuilder;
@Override
public ShardId shardId() {
return new ShardId(indexService.index(), 0);
}
@Override
public String[] types() {
return new String[0];
}
@Override
public SearchSourceBuilder source() {
return searchSourceBuilder;
}
@Override
public void source(SearchSourceBuilder source) {
searchSourceBuilder = source;
}
@Override
public int numberOfShards() {
return 0;
}
@Override
public SearchType searchType() {
return null;
}
@Override
public QueryBuilder filteringAliases() {
return null;
}
@Override
public float indexBoost() {
return 1.0f;
}
@Override
public long nowInMillis() {
return 0;
}
@Override
public Boolean requestCache() {
return null;
}
@Override
public Scroll scroll() {
return null;
}
@Override
public void setProfile(boolean profile) {
}
@Override
public boolean isProfile() {
return false;
}
@Override
public BytesReference cacheKey() throws IOException {
return null;
}
@Override
public void rewrite(QueryShardContext context) throws IOException {
}
};
@Override
public ShardSearchRequest request() {
return request;
}
};
}
use of org.elasticsearch.index.query.QueryShardContext in project elasticsearch by elastic.
the class SliceBuilderTests method testToFilter.
public void testToFilter() throws IOException {
Directory dir = new RAMDirectory();
try (IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())))) {
writer.commit();
}
QueryShardContext context = mock(QueryShardContext.class);
try (IndexReader reader = DirectoryReader.open(dir)) {
MappedFieldType fieldType = new MappedFieldType() {
@Override
public MappedFieldType clone() {
return null;
}
@Override
public String typeName() {
return null;
}
@Override
public Query termQuery(Object value, @Nullable QueryShardContext context) {
return null;
}
};
fieldType.setName(UidFieldMapper.NAME);
fieldType.setHasDocValues(false);
when(context.fieldMapper(UidFieldMapper.NAME)).thenReturn(fieldType);
when(context.getIndexReader()).thenReturn(reader);
SliceBuilder builder = new SliceBuilder(5, 10);
Query query = builder.toFilter(context, 0, 1);
assertThat(query, instanceOf(TermsSliceQuery.class));
assertThat(builder.toFilter(context, 0, 1), equalTo(query));
try (IndexReader newReader = DirectoryReader.open(dir)) {
when(context.getIndexReader()).thenReturn(newReader);
assertThat(builder.toFilter(context, 0, 1), equalTo(query));
}
}
try (IndexReader reader = DirectoryReader.open(dir)) {
MappedFieldType fieldType = new MappedFieldType() {
@Override
public MappedFieldType clone() {
return null;
}
@Override
public String typeName() {
return null;
}
@Override
public Query termQuery(Object value, @Nullable QueryShardContext context) {
return null;
}
};
fieldType.setName("field_doc_values");
fieldType.setHasDocValues(true);
fieldType.setDocValuesType(DocValuesType.SORTED_NUMERIC);
when(context.fieldMapper("field_doc_values")).thenReturn(fieldType);
when(context.getIndexReader()).thenReturn(reader);
IndexNumericFieldData fd = mock(IndexNumericFieldData.class);
when(context.getForField(fieldType)).thenReturn(fd);
SliceBuilder builder = new SliceBuilder("field_doc_values", 5, 10);
Query query = builder.toFilter(context, 0, 1);
assertThat(query, instanceOf(DocValuesSliceQuery.class));
assertThat(builder.toFilter(context, 0, 1), equalTo(query));
try (IndexReader newReader = DirectoryReader.open(dir)) {
when(context.getIndexReader()).thenReturn(newReader);
assertThat(builder.toFilter(context, 0, 1), equalTo(query));
}
// numSlices > numShards
int numSlices = randomIntBetween(10, 100);
int numShards = randomIntBetween(1, 9);
Map<Integer, AtomicInteger> numSliceMap = new HashMap<>();
for (int i = 0; i < numSlices; i++) {
for (int j = 0; j < numShards; j++) {
SliceBuilder slice = new SliceBuilder("_uid", i, numSlices);
Query q = slice.toFilter(context, j, numShards);
if (q instanceof TermsSliceQuery || q instanceof MatchAllDocsQuery) {
AtomicInteger count = numSliceMap.get(j);
if (count == null) {
count = new AtomicInteger(0);
numSliceMap.put(j, count);
}
count.incrementAndGet();
if (q instanceof MatchAllDocsQuery) {
assertThat(count.get(), equalTo(1));
}
} else {
assertThat(q, instanceOf(MatchNoDocsQuery.class));
}
}
}
int total = 0;
for (Map.Entry<Integer, AtomicInteger> e : numSliceMap.entrySet()) {
total += e.getValue().get();
}
assertThat(total, equalTo(numSlices));
// numShards > numSlices
numShards = randomIntBetween(4, 100);
numSlices = randomIntBetween(2, numShards - 1);
List<Integer> targetShards = new ArrayList<>();
for (int i = 0; i < numSlices; i++) {
for (int j = 0; j < numShards; j++) {
SliceBuilder slice = new SliceBuilder("_uid", i, numSlices);
Query q = slice.toFilter(context, j, numShards);
if (q instanceof MatchNoDocsQuery == false) {
assertThat(q, instanceOf(MatchAllDocsQuery.class));
targetShards.add(j);
}
}
}
assertThat(targetShards.size(), equalTo(numShards));
assertThat(new HashSet<>(targetShards).size(), equalTo(numShards));
// numShards == numSlices
numShards = randomIntBetween(2, 10);
numSlices = numShards;
for (int i = 0; i < numSlices; i++) {
for (int j = 0; j < numShards; j++) {
SliceBuilder slice = new SliceBuilder("_uid", i, numSlices);
Query q = slice.toFilter(context, j, numShards);
if (i == j) {
assertThat(q, instanceOf(MatchAllDocsQuery.class));
} else {
assertThat(q, instanceOf(MatchNoDocsQuery.class));
}
}
}
}
try (IndexReader reader = DirectoryReader.open(dir)) {
MappedFieldType fieldType = new MappedFieldType() {
@Override
public MappedFieldType clone() {
return null;
}
@Override
public String typeName() {
return null;
}
@Override
public Query termQuery(Object value, @Nullable QueryShardContext context) {
return null;
}
};
fieldType.setName("field_without_doc_values");
when(context.fieldMapper("field_without_doc_values")).thenReturn(fieldType);
when(context.getIndexReader()).thenReturn(reader);
SliceBuilder builder = new SliceBuilder("field_without_doc_values", 5, 10);
IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> builder.toFilter(context, 0, 1));
assertThat(exc.getMessage(), containsString("cannot load numeric doc values"));
}
}
use of org.elasticsearch.index.query.QueryShardContext in project elasticsearch by elastic.
the class AbstractSortTestCase method createMockShardContext.
protected QueryShardContext createMockShardContext() {
Index index = new Index(randomAsciiOfLengthBetween(1, 10), "_na_");
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings(index, Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build());
IndicesFieldDataCache cache = new IndicesFieldDataCache(Settings.EMPTY, null);
IndexFieldDataService ifds = new IndexFieldDataService(IndexSettingsModule.newIndexSettings("test", Settings.EMPTY), cache, null, null);
BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(idxSettings, new BitsetFilterCache.Listener() {
@Override
public void onRemoval(ShardId shardId, Accountable accountable) {
}
@Override
public void onCache(ShardId shardId, Accountable accountable) {
}
});
long nowInMillis = randomNonNegativeLong();
return new QueryShardContext(0, idxSettings, bitsetFilterCache, ifds, null, null, scriptService, xContentRegistry(), null, null, () -> nowInMillis) {
@Override
public MappedFieldType fieldMapper(String name) {
return provideMappedFieldType(name);
}
@Override
public ObjectMapper getObjectMapper(String name) {
BuilderContext context = new BuilderContext(this.getIndexSettings().getSettings(), new ContentPath());
return new ObjectMapper.Builder<>(name).nested(Nested.newNested(false, false)).build(context);
}
};
}
Aggregations