use of org.elasticsearch.index.query.QueryBuilder in project elasticsearch by elastic.
the class FunctionScoreQueryBuilder method fromXContent.
public static FunctionScoreQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
XContentParser parser = parseContext.parser();
QueryBuilder query = null;
float boost = AbstractQueryBuilder.DEFAULT_BOOST;
String queryName = null;
FiltersFunctionScoreQuery.ScoreMode scoreMode = FunctionScoreQueryBuilder.DEFAULT_SCORE_MODE;
float maxBoost = FunctionScoreQuery.DEFAULT_MAX_BOOST;
Float minScore = null;
String currentFieldName = null;
XContentParser.Token token;
CombineFunction combineFunction = null;
// Either define array of functions and filters or only one function
boolean functionArrayFound = false;
boolean singleFunctionFound = false;
String singleFunctionName = null;
List<FunctionScoreQueryBuilder.FilterFunctionBuilder> filterFunctionBuilders = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
if (QUERY_FIELD.match(currentFieldName)) {
if (query != null) {
throw new ParsingException(parser.getTokenLocation(), "failed to parse [{}] query. [query] is already defined.", NAME);
}
query = parseContext.parseInnerQueryBuilder();
} else {
if (singleFunctionFound) {
throw new ParsingException(parser.getTokenLocation(), "failed to parse [{}] query. already found function [{}], now encountering [{}]. use [functions] " + "array if you want to define several functions.", NAME, singleFunctionName, currentFieldName);
}
if (functionArrayFound) {
String errorString = "already found [functions] array, now encountering [" + currentFieldName + "].";
handleMisplacedFunctionsDeclaration(parser.getTokenLocation(), errorString);
}
singleFunctionFound = true;
singleFunctionName = currentFieldName;
ScoreFunctionBuilder<?> scoreFunction = parser.namedObject(ScoreFunctionBuilder.class, currentFieldName, parseContext);
filterFunctionBuilders.add(new FunctionScoreQueryBuilder.FilterFunctionBuilder(scoreFunction));
}
} else if (token == XContentParser.Token.START_ARRAY) {
if (FUNCTIONS_FIELD.match(currentFieldName)) {
if (singleFunctionFound) {
String errorString = "already found [" + singleFunctionName + "], now encountering [functions].";
handleMisplacedFunctionsDeclaration(parser.getTokenLocation(), errorString);
}
functionArrayFound = true;
currentFieldName = parseFiltersAndFunctions(parseContext, filterFunctionBuilders);
} else {
throw new ParsingException(parser.getTokenLocation(), "failed to parse [{}] query. array [{}] is not supported", NAME, currentFieldName);
}
} else if (token.isValue()) {
if (SCORE_MODE_FIELD.match(currentFieldName)) {
scoreMode = FiltersFunctionScoreQuery.ScoreMode.fromString(parser.text());
} else if (BOOST_MODE_FIELD.match(currentFieldName)) {
combineFunction = CombineFunction.fromString(parser.text());
} else if (MAX_BOOST_FIELD.match(currentFieldName)) {
maxBoost = parser.floatValue();
} else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) {
boost = parser.floatValue();
} else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) {
queryName = parser.text();
} else if (MIN_SCORE_FIELD.match(currentFieldName)) {
minScore = parser.floatValue();
} else {
if (singleFunctionFound) {
throw new ParsingException(parser.getTokenLocation(), "failed to parse [{}] query. already found function [{}], now encountering [{}]. use [functions] array " + "if you want to define several functions.", NAME, singleFunctionName, currentFieldName);
}
if (functionArrayFound) {
String errorString = "already found [functions] array, now encountering [" + currentFieldName + "].";
handleMisplacedFunctionsDeclaration(parser.getTokenLocation(), errorString);
}
if (WEIGHT_FIELD.match(currentFieldName)) {
filterFunctionBuilders.add(new FunctionScoreQueryBuilder.FilterFunctionBuilder(new WeightBuilder().setWeight(parser.floatValue())));
singleFunctionFound = true;
singleFunctionName = currentFieldName;
} else {
throw new ParsingException(parser.getTokenLocation(), "failed to parse [{}] query. field [{}] is not supported", NAME, currentFieldName);
}
}
}
}
if (query == null) {
query = new MatchAllQueryBuilder();
}
FunctionScoreQueryBuilder functionScoreQueryBuilder = new FunctionScoreQueryBuilder(query, filterFunctionBuilders.toArray(new FunctionScoreQueryBuilder.FilterFunctionBuilder[filterFunctionBuilders.size()]));
if (combineFunction != null) {
functionScoreQueryBuilder.boostMode(combineFunction);
}
functionScoreQueryBuilder.scoreMode(scoreMode);
functionScoreQueryBuilder.maxBoost(maxBoost);
if (minScore != null) {
functionScoreQueryBuilder.setMinScore(minScore);
}
functionScoreQueryBuilder.boost(boost);
functionScoreQueryBuilder.queryName(queryName);
return functionScoreQueryBuilder;
}
use of org.elasticsearch.index.query.QueryBuilder in project elasticsearch by elastic.
the class PhraseSuggester method innerExecute.
/*
* More Ideas:
* - add ability to find whitespace problems -> we can build a poor mans decompounder with our index based on a automaton?
* - add ability to build different error models maybe based on a confusion matrix?
* - try to combine a token with its subsequent token to find / detect word splits (optional)
* - for this to work we need some way to defined the position length of a candidate
* - phonetic filters could be interesting here too for candidate selection
*/
@Override
public Suggestion<? extends Entry<? extends Option>> innerExecute(String name, PhraseSuggestionContext suggestion, IndexSearcher searcher, CharsRefBuilder spare) throws IOException {
double realWordErrorLikelihood = suggestion.realworldErrorLikelyhood();
final PhraseSuggestion response = new PhraseSuggestion(name, suggestion.getSize());
final IndexReader indexReader = searcher.getIndexReader();
List<PhraseSuggestionContext.DirectCandidateGenerator> generators = suggestion.generators();
final int numGenerators = generators.size();
final List<CandidateGenerator> gens = new ArrayList<>(generators.size());
for (int i = 0; i < numGenerators; i++) {
PhraseSuggestionContext.DirectCandidateGenerator generator = generators.get(i);
DirectSpellChecker directSpellChecker = generator.createDirectSpellChecker();
Terms terms = MultiFields.getTerms(indexReader, generator.field());
if (terms != null) {
gens.add(new DirectCandidateGenerator(directSpellChecker, generator.field(), generator.suggestMode(), indexReader, realWordErrorLikelihood, generator.size(), generator.preFilter(), generator.postFilter(), terms));
}
}
final String suggestField = suggestion.getField();
final Terms suggestTerms = MultiFields.getTerms(indexReader, suggestField);
if (gens.size() > 0 && suggestTerms != null) {
final NoisyChannelSpellChecker checker = new NoisyChannelSpellChecker(realWordErrorLikelihood, suggestion.getRequireUnigram(), suggestion.getTokenLimit());
final BytesRef separator = suggestion.separator();
WordScorer wordScorer = suggestion.model().newScorer(indexReader, suggestTerms, suggestField, realWordErrorLikelihood, separator);
Result checkerResult;
try (TokenStream stream = checker.tokenStream(suggestion.getAnalyzer(), suggestion.getText(), spare, suggestion.getField())) {
checkerResult = checker.getCorrections(stream, new MultiCandidateGeneratorWrapper(suggestion.getShardSize(), gens.toArray(new CandidateGenerator[gens.size()])), suggestion.maxErrors(), suggestion.getShardSize(), wordScorer, suggestion.confidence(), suggestion.gramSize());
}
PhraseSuggestion.Entry resultEntry = buildResultEntry(suggestion, spare, checkerResult.cutoffScore);
response.addTerm(resultEntry);
final BytesRefBuilder byteSpare = new BytesRefBuilder();
final Function<Map<String, Object>, ExecutableScript> collateScript = suggestion.getCollateQueryScript();
final boolean collatePrune = (collateScript != null) && suggestion.collatePrune();
for (int i = 0; i < checkerResult.corrections.length; i++) {
Correction correction = checkerResult.corrections[i];
spare.copyUTF8Bytes(correction.join(SEPARATOR, byteSpare, null, null));
boolean collateMatch = true;
if (collateScript != null) {
// Checks if the template query collateScript yields any documents
// from the index for a correction, collateMatch is updated
final Map<String, Object> vars = suggestion.getCollateScriptParams();
vars.put(SUGGESTION_TEMPLATE_VAR_NAME, spare.toString());
QueryShardContext shardContext = suggestion.getShardContext();
final ExecutableScript executable = collateScript.apply(vars);
final BytesReference querySource = (BytesReference) executable.run();
try (XContentParser parser = XContentFactory.xContent(querySource).createParser(shardContext.getXContentRegistry(), querySource)) {
QueryBuilder innerQueryBuilder = shardContext.newParseContext(parser).parseInnerQueryBuilder();
final ParsedQuery parsedQuery = shardContext.toQuery(innerQueryBuilder);
collateMatch = Lucene.exists(searcher, parsedQuery.query());
}
}
if (!collateMatch && !collatePrune) {
continue;
}
Text phrase = new Text(spare.toString());
Text highlighted = null;
if (suggestion.getPreTag() != null) {
spare.copyUTF8Bytes(correction.join(SEPARATOR, byteSpare, suggestion.getPreTag(), suggestion.getPostTag()));
highlighted = new Text(spare.toString());
}
if (collatePrune) {
resultEntry.addOption(new Suggestion.Entry.Option(phrase, highlighted, (float) (correction.score), collateMatch));
} else {
resultEntry.addOption(new Suggestion.Entry.Option(phrase, highlighted, (float) (correction.score)));
}
}
} else {
response.addTerm(buildResultEntry(suggestion, spare, Double.MIN_VALUE));
}
return response;
}
use of org.elasticsearch.index.query.QueryBuilder in project elasticsearch by elastic.
the class SearchSlowLogTests method createSearchContext.
@Override
protected SearchContext createSearchContext(IndexService indexService) {
BigArrays bigArrays = indexService.getBigArrays();
ThreadPool threadPool = indexService.getThreadPool();
return new TestSearchContext(threadPool, bigArrays, indexService) {
final ShardSearchRequest request = new ShardSearchRequest() {
private SearchSourceBuilder searchSourceBuilder;
@Override
public ShardId shardId() {
return new ShardId(indexService.index(), 0);
}
@Override
public String[] types() {
return new String[0];
}
@Override
public SearchSourceBuilder source() {
return searchSourceBuilder;
}
@Override
public void source(SearchSourceBuilder source) {
searchSourceBuilder = source;
}
@Override
public int numberOfShards() {
return 0;
}
@Override
public SearchType searchType() {
return null;
}
@Override
public QueryBuilder filteringAliases() {
return null;
}
@Override
public float indexBoost() {
return 1.0f;
}
@Override
public long nowInMillis() {
return 0;
}
@Override
public Boolean requestCache() {
return null;
}
@Override
public Scroll scroll() {
return null;
}
@Override
public void setProfile(boolean profile) {
}
@Override
public boolean isProfile() {
return false;
}
@Override
public BytesReference cacheKey() throws IOException {
return null;
}
@Override
public void rewrite(QueryShardContext context) throws IOException {
}
};
@Override
public ShardSearchRequest request() {
return request;
}
};
}
use of org.elasticsearch.index.query.QueryBuilder in project elasticsearch by elastic.
the class QueryRescoreBuilderTests method randomRescoreBuilder.
/**
* create random shape that is put under test
*/
public static QueryRescorerBuilder randomRescoreBuilder() {
QueryBuilder queryBuilder = new MatchAllQueryBuilder().boost(randomFloat()).queryName(randomAsciiOfLength(20));
org.elasticsearch.search.rescore.QueryRescorerBuilder rescorer = new org.elasticsearch.search.rescore.QueryRescorerBuilder(queryBuilder);
if (randomBoolean()) {
rescorer.setQueryWeight(randomFloat());
}
if (randomBoolean()) {
rescorer.setRescoreQueryWeight(randomFloat());
}
if (randomBoolean()) {
rescorer.setScoreMode(randomFrom(QueryRescoreMode.values()));
}
if (randomBoolean()) {
rescorer.windowSize(randomIntBetween(0, 100));
}
return rescorer;
}
use of org.elasticsearch.index.query.QueryBuilder in project elasticsearch by elastic.
the class FiltersTests method createTestAggregatorBuilder.
@Override
protected FiltersAggregationBuilder createTestAggregatorBuilder() {
int size = randomIntBetween(1, 20);
FiltersAggregationBuilder factory;
if (randomBoolean()) {
KeyedFilter[] filters = new KeyedFilter[size];
int i = 0;
for (String key : randomUnique(() -> randomAsciiOfLengthBetween(1, 20), size)) {
filters[i++] = new KeyedFilter(key, QueryBuilders.termQuery(randomAsciiOfLengthBetween(5, 20), randomAsciiOfLengthBetween(5, 20)));
}
factory = new FiltersAggregationBuilder(randomAsciiOfLengthBetween(1, 20), filters);
} else {
QueryBuilder[] filters = new QueryBuilder[size];
for (int i = 0; i < size; i++) {
filters[i] = QueryBuilders.termQuery(randomAsciiOfLengthBetween(5, 20), randomAsciiOfLengthBetween(5, 20));
}
factory = new FiltersAggregationBuilder(randomAsciiOfLengthBetween(1, 20), filters);
}
if (randomBoolean()) {
factory.otherBucket(randomBoolean());
}
if (randomBoolean()) {
factory.otherBucketKey(randomAsciiOfLengthBetween(1, 20));
}
return factory;
}
Aggregations