use of org.elasticsearch.index.mapper.DocumentMapperForType in project elasticsearch by elastic.
the class TermVectorsService method parseDocument.
private static ParsedDocument parseDocument(IndexShard indexShard, String index, String type, BytesReference doc, XContentType xContentType) {
MapperService mapperService = indexShard.mapperService();
DocumentMapperForType docMapper = mapperService.documentMapperWithAutoCreate(type);
ParsedDocument parsedDocument = docMapper.getDocumentMapper().parse(source(index, type, "_id_for_tv_api", doc, xContentType));
if (docMapper.getMapping() != null) {
parsedDocument.addDynamicMappingsUpdate(docMapper.getMapping());
}
return parsedDocument;
}
use of org.elasticsearch.index.mapper.DocumentMapperForType in project elasticsearch by elastic.
the class PercolateQueryBuilder method doToQuery.
@Override
protected Query doToQuery(QueryShardContext context) throws IOException {
// Call nowInMillis() so that this query becomes un-cacheable since we
// can't be sure that it doesn't use now or scripts
context.nowInMillis();
if (indexedDocumentIndex != null || indexedDocumentType != null || indexedDocumentId != null) {
throw new IllegalStateException("query builder must be rewritten first");
}
if (document == null) {
throw new IllegalStateException("no document to percolate");
}
MapperService mapperService = context.getMapperService();
DocumentMapperForType docMapperForType = mapperService.documentMapperWithAutoCreate(documentType);
DocumentMapper docMapper = docMapperForType.getDocumentMapper();
ParsedDocument doc = docMapper.parse(source(context.index().getName(), documentType, "_temp_id", document, documentXContentType));
FieldNameAnalyzer fieldNameAnalyzer = (FieldNameAnalyzer) docMapper.mappers().indexAnalyzer();
// Need to this custom impl because FieldNameAnalyzer is strict and the percolator sometimes isn't when
// 'index.percolator.map_unmapped_fields_as_string' is enabled:
Analyzer analyzer = new DelegatingAnalyzerWrapper(Analyzer.PER_FIELD_REUSE_STRATEGY) {
@Override
protected Analyzer getWrappedAnalyzer(String fieldName) {
Analyzer analyzer = fieldNameAnalyzer.analyzers().get(fieldName);
if (analyzer != null) {
return analyzer;
} else {
return context.getIndexAnalyzers().getDefaultIndexAnalyzer();
}
}
};
final IndexSearcher docSearcher;
if (doc.docs().size() > 1) {
assert docMapper.hasNestedObjects();
docSearcher = createMultiDocumentSearcher(analyzer, doc);
} else {
MemoryIndex memoryIndex = MemoryIndex.fromDocument(doc.rootDoc(), analyzer, true, false);
docSearcher = memoryIndex.createSearcher();
docSearcher.setQueryCache(null);
}
Version indexVersionCreated = context.getIndexSettings().getIndexVersionCreated();
boolean mapUnmappedFieldsAsString = context.getIndexSettings().getValue(PercolatorFieldMapper.INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING);
// We have to make a copy of the QueryShardContext here so we can have a unfrozen version for parsing the legacy
// percolator queries
QueryShardContext percolateShardContext = new QueryShardContext(context);
MappedFieldType fieldType = context.fieldMapper(field);
if (fieldType == null) {
throw new QueryShardException(context, "field [" + field + "] does not exist");
}
if (!(fieldType instanceof PercolatorFieldMapper.FieldType)) {
throw new QueryShardException(context, "expected field [" + field + "] to be of type [percolator], but is of type [" + fieldType.typeName() + "]");
}
PercolatorFieldMapper.FieldType pft = (PercolatorFieldMapper.FieldType) fieldType;
PercolateQuery.QueryStore queryStore = createStore(pft, percolateShardContext, mapUnmappedFieldsAsString);
return pft.percolateQuery(documentType, queryStore, document, docSearcher);
}
Aggregations