use of org.elasticsearch.index.mapper.ParsedDocument in project elasticsearch by elastic.
the class PercolateQueryBuilder method doToQuery.
@Override
protected Query doToQuery(QueryShardContext context) throws IOException {
// Call nowInMillis() so that this query becomes un-cacheable since we
// can't be sure that it doesn't use now or scripts
context.nowInMillis();
if (indexedDocumentIndex != null || indexedDocumentType != null || indexedDocumentId != null) {
throw new IllegalStateException("query builder must be rewritten first");
}
if (document == null) {
throw new IllegalStateException("no document to percolate");
}
MapperService mapperService = context.getMapperService();
DocumentMapperForType docMapperForType = mapperService.documentMapperWithAutoCreate(documentType);
DocumentMapper docMapper = docMapperForType.getDocumentMapper();
ParsedDocument doc = docMapper.parse(source(context.index().getName(), documentType, "_temp_id", document, documentXContentType));
FieldNameAnalyzer fieldNameAnalyzer = (FieldNameAnalyzer) docMapper.mappers().indexAnalyzer();
// Need to this custom impl because FieldNameAnalyzer is strict and the percolator sometimes isn't when
// 'index.percolator.map_unmapped_fields_as_string' is enabled:
Analyzer analyzer = new DelegatingAnalyzerWrapper(Analyzer.PER_FIELD_REUSE_STRATEGY) {
@Override
protected Analyzer getWrappedAnalyzer(String fieldName) {
Analyzer analyzer = fieldNameAnalyzer.analyzers().get(fieldName);
if (analyzer != null) {
return analyzer;
} else {
return context.getIndexAnalyzers().getDefaultIndexAnalyzer();
}
}
};
final IndexSearcher docSearcher;
if (doc.docs().size() > 1) {
assert docMapper.hasNestedObjects();
docSearcher = createMultiDocumentSearcher(analyzer, doc);
} else {
MemoryIndex memoryIndex = MemoryIndex.fromDocument(doc.rootDoc(), analyzer, true, false);
docSearcher = memoryIndex.createSearcher();
docSearcher.setQueryCache(null);
}
Version indexVersionCreated = context.getIndexSettings().getIndexVersionCreated();
boolean mapUnmappedFieldsAsString = context.getIndexSettings().getValue(PercolatorFieldMapper.INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING);
// We have to make a copy of the QueryShardContext here so we can have a unfrozen version for parsing the legacy
// percolator queries
QueryShardContext percolateShardContext = new QueryShardContext(context);
MappedFieldType fieldType = context.fieldMapper(field);
if (fieldType == null) {
throw new QueryShardException(context, "field [" + field + "] does not exist");
}
if (!(fieldType instanceof PercolatorFieldMapper.FieldType)) {
throw new QueryShardException(context, "expected field [" + field + "] to be of type [percolator], but is of type [" + fieldType.typeName() + "]");
}
PercolatorFieldMapper.FieldType pft = (PercolatorFieldMapper.FieldType) fieldType;
PercolateQuery.QueryStore queryStore = createStore(pft, percolateShardContext, mapUnmappedFieldsAsString);
return pft.percolateQuery(documentType, queryStore, document, docSearcher);
}
use of org.elasticsearch.index.mapper.ParsedDocument in project elasticsearch by elastic.
the class PercolateQueryBuilderTests method testCreateMultiDocumentSearcher.
public void testCreateMultiDocumentSearcher() throws Exception {
int numDocs = randomIntBetween(2, 8);
List<ParseContext.Document> docs = new ArrayList<>(numDocs);
for (int i = 0; i < numDocs; i++) {
docs.add(new ParseContext.Document());
}
Analyzer analyzer = new WhitespaceAnalyzer();
ParsedDocument parsedDocument = new ParsedDocument(null, null, "_id", "_type", null, docs, null, null, null);
IndexSearcher indexSearcher = PercolateQueryBuilder.createMultiDocumentSearcher(analyzer, parsedDocument);
assertThat(indexSearcher.getIndexReader().numDocs(), equalTo(numDocs));
// ensure that any query get modified so that the nested docs are never included as hits:
Query query = new MatchAllDocsQuery();
BooleanQuery result = (BooleanQuery) indexSearcher.createNormalizedWeight(query, true).getQuery();
assertThat(result.clauses().size(), equalTo(2));
assertThat(result.clauses().get(0).getQuery(), sameInstance(query));
assertThat(result.clauses().get(0).getOccur(), equalTo(BooleanClause.Occur.MUST));
assertThat(result.clauses().get(1).getOccur(), equalTo(BooleanClause.Occur.MUST_NOT));
}
use of org.elasticsearch.index.mapper.ParsedDocument in project elasticsearch by elastic.
the class PercolatorFieldMapperTests method testMultiplePercolatorFields.
// multiple percolator fields are allowed in the mapping, but only one field can be used at index time.
public void testMultiplePercolatorFields() throws Exception {
String typeName = "another_type";
String percolatorMapper = XContentFactory.jsonBuilder().startObject().startObject(typeName).startObject("_field_names").field("enabled", false).endObject().startObject("properties").startObject("query_field1").field("type", "percolator").endObject().startObject("query_field2").field("type", "percolator").endObject().endObject().endObject().endObject().string();
mapperService.merge(typeName, new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE, true);
QueryBuilder queryBuilder = matchQuery("field", "value");
ParsedDocument doc = mapperService.documentMapper(typeName).parse("test", typeName, "1", jsonBuilder().startObject().field("query_field1", queryBuilder).field("query_field2", queryBuilder).endObject().bytes());
// also includes all other meta fields
assertThat(doc.rootDoc().getFields().size(), equalTo(14));
BytesRef queryBuilderAsBytes = doc.rootDoc().getField("query_field1.query_builder_field").binaryValue();
assertQueryBuilder(queryBuilderAsBytes, queryBuilder);
queryBuilderAsBytes = doc.rootDoc().getField("query_field2.query_builder_field").binaryValue();
assertQueryBuilder(queryBuilderAsBytes, queryBuilder);
}
use of org.elasticsearch.index.mapper.ParsedDocument in project elasticsearch by elastic.
the class PercolatorFieldMapperTests method testPercolatorFieldMapper.
public void testPercolatorFieldMapper() throws Exception {
addQueryMapping();
QueryBuilder queryBuilder = termQuery("field", "value");
ParsedDocument doc = mapperService.documentMapper(typeName).parse("test", typeName, "1", XContentFactory.jsonBuilder().startObject().field(fieldName, queryBuilder).endObject().bytes());
assertThat(doc.rootDoc().getFields(fieldType.queryTermsField.name()).length, equalTo(1));
assertThat(doc.rootDoc().getFields(fieldType.queryTermsField.name())[0].binaryValue().utf8ToString(), equalTo("field\0value"));
assertThat(doc.rootDoc().getFields(fieldType.queryBuilderField.name()).length, equalTo(1));
assertThat(doc.rootDoc().getFields(fieldType.extractionResultField.name()).length, equalTo(1));
assertThat(doc.rootDoc().getFields(fieldType.extractionResultField.name())[0].stringValue(), equalTo(EXTRACTION_COMPLETE));
BytesRef qbSource = doc.rootDoc().getFields(fieldType.queryBuilderField.name())[0].binaryValue();
assertQueryBuilder(qbSource, queryBuilder);
// add an query for which we don't extract terms from
queryBuilder = rangeQuery("field").from("a").to("z");
doc = mapperService.documentMapper(typeName).parse("test", typeName, "1", XContentFactory.jsonBuilder().startObject().field(fieldName, queryBuilder).endObject().bytes());
assertThat(doc.rootDoc().getFields(fieldType.extractionResultField.name()).length, equalTo(1));
assertThat(doc.rootDoc().getFields(fieldType.extractionResultField.name())[0].stringValue(), equalTo(EXTRACTION_FAILED));
assertThat(doc.rootDoc().getFields(fieldType.queryTermsField.name()).length, equalTo(0));
assertThat(doc.rootDoc().getFields(fieldType.queryBuilderField.name()).length, equalTo(1));
qbSource = doc.rootDoc().getFields(fieldType.queryBuilderField.name())[0].binaryValue();
assertQueryBuilder(qbSource, queryBuilder);
}
use of org.elasticsearch.index.mapper.ParsedDocument in project elasticsearch by elastic.
the class PercolatorFieldMapperTests method testStoringQueries.
public void testStoringQueries() throws Exception {
addQueryMapping();
QueryBuilder[] queries = new QueryBuilder[] { termQuery("field", "value"), matchAllQuery(), matchQuery("field", "value"), matchPhraseQuery("field", "value"), prefixQuery("field", "v"), wildcardQuery("field", "v*"), rangeQuery("number_field").gte(0).lte(9), rangeQuery("date_field").from("2015-01-01T00:00").to("2015-01-01T00:00") };
for (QueryBuilder query : queries) {
ParsedDocument doc = mapperService.documentMapper(typeName).parse("test", typeName, "1", XContentFactory.jsonBuilder().startObject().field(fieldName, query).endObject().bytes());
BytesRef qbSource = doc.rootDoc().getFields(fieldType.queryBuilderField.name())[0].binaryValue();
assertQueryBuilder(qbSource, query);
}
}
Aggregations