Search in sources :

Example 6 with FieldMapper

use of org.elasticsearch.index.mapper.FieldMapper in project elasticsearch by elastic.

the class DynamicTemplatesTests method testSimple.

public void testSimple() throws Exception {
    String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/dynamictemplate/simple/test-mapping.json");
    IndexService index = createIndex("test");
    client().admin().indices().preparePutMapping("test").setType("person").setSource(mapping, XContentType.JSON).get();
    DocumentMapper docMapper = index.mapperService().documentMapper("person");
    byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/dynamictemplate/simple/test-data.json");
    ParsedDocument parsedDoc = docMapper.parse("test", "person", "1", new BytesArray(json));
    client().admin().indices().preparePutMapping("test").setType("person").setSource(parsedDoc.dynamicMappingsUpdate().toString(), XContentType.JSON).get();
    docMapper = index.mapperService().documentMapper("person");
    Document doc = parsedDoc.rootDoc();
    IndexableField f = doc.getField("name");
    assertThat(f.name(), equalTo("name"));
    assertThat(f.binaryValue(), equalTo(new BytesRef("some name")));
    assertNotSame(IndexOptions.NONE, f.fieldType().indexOptions());
    assertThat(f.fieldType().tokenized(), equalTo(false));
    FieldMapper fieldMapper = docMapper.mappers().getMapper("name");
    assertNotNull(fieldMapper);
    f = doc.getField("multi1");
    assertThat(f.name(), equalTo("multi1"));
    assertThat(f.stringValue(), equalTo("multi 1"));
    assertNotSame(IndexOptions.NONE, f.fieldType().indexOptions());
    assertThat(f.fieldType().tokenized(), equalTo(true));
    fieldMapper = docMapper.mappers().getMapper("multi1");
    assertNotNull(fieldMapper);
    f = doc.getField("multi1.org");
    assertThat(f.name(), equalTo("multi1.org"));
    assertThat(f.binaryValue(), equalTo(new BytesRef("multi 1")));
    assertNotSame(IndexOptions.NONE, f.fieldType().indexOptions());
    assertThat(f.fieldType().tokenized(), equalTo(false));
    fieldMapper = docMapper.mappers().getMapper("multi1.org");
    assertNotNull(fieldMapper);
    f = doc.getField("multi2");
    assertThat(f.name(), equalTo("multi2"));
    assertThat(f.stringValue(), equalTo("multi 2"));
    assertNotSame(IndexOptions.NONE, f.fieldType().indexOptions());
    assertThat(f.fieldType().tokenized(), equalTo(true));
    fieldMapper = docMapper.mappers().getMapper("multi2");
    assertNotNull(fieldMapper);
    f = doc.getField("multi2.org");
    assertThat(f.name(), equalTo("multi2.org"));
    assertThat(f.binaryValue(), equalTo(new BytesRef("multi 2")));
    assertNotSame(IndexOptions.NONE, f.fieldType().indexOptions());
    assertThat(f.fieldType().tokenized(), equalTo(false));
    fieldMapper = docMapper.mappers().getMapper("multi2.org");
    assertNotNull(fieldMapper);
}
Also used : IndexableField(org.apache.lucene.index.IndexableField) BytesArray(org.elasticsearch.common.bytes.BytesArray) ParsedDocument(org.elasticsearch.index.mapper.ParsedDocument) IndexService(org.elasticsearch.index.IndexService) DocumentMapper(org.elasticsearch.index.mapper.DocumentMapper) Document(org.elasticsearch.index.mapper.ParseContext.Document) ParsedDocument(org.elasticsearch.index.mapper.ParsedDocument) FieldMapper(org.elasticsearch.index.mapper.FieldMapper) BytesRef(org.apache.lucene.util.BytesRef)

Example 7 with FieldMapper

use of org.elasticsearch.index.mapper.FieldMapper in project elasticsearch by elastic.

the class ShardGetService method innerGetLoadFromStoredFields.

private GetResult innerGetLoadFromStoredFields(String type, String id, String[] gFields, FetchSourceContext fetchSourceContext, Engine.GetResult get, MapperService mapperService) {
    Map<String, GetField> fields = null;
    BytesReference source = null;
    Versions.DocIdAndVersion docIdAndVersion = get.docIdAndVersion();
    FieldsVisitor fieldVisitor = buildFieldsVisitors(gFields, fetchSourceContext);
    if (fieldVisitor != null) {
        try {
            docIdAndVersion.context.reader().document(docIdAndVersion.docId, fieldVisitor);
        } catch (IOException e) {
            throw new ElasticsearchException("Failed to get type [" + type + "] and id [" + id + "]", e);
        }
        source = fieldVisitor.source();
        if (!fieldVisitor.fields().isEmpty()) {
            fieldVisitor.postProcess(mapperService);
            fields = new HashMap<>(fieldVisitor.fields().size());
            for (Map.Entry<String, List<Object>> entry : fieldVisitor.fields().entrySet()) {
                fields.put(entry.getKey(), new GetField(entry.getKey(), entry.getValue()));
            }
        }
    }
    DocumentMapper docMapper = mapperService.documentMapper(type);
    if (docMapper.parentFieldMapper().active()) {
        String parentId = ParentFieldSubFetchPhase.getParentId(docMapper.parentFieldMapper(), docIdAndVersion.context.reader(), docIdAndVersion.docId);
        if (fields == null) {
            fields = new HashMap<>(1);
        }
        fields.put(ParentFieldMapper.NAME, new GetField(ParentFieldMapper.NAME, Collections.singletonList(parentId)));
    }
    if (gFields != null && gFields.length > 0) {
        for (String field : gFields) {
            FieldMapper fieldMapper = docMapper.mappers().smartNameFieldMapper(field);
            if (fieldMapper == null) {
                if (docMapper.objectMappers().get(field) != null) {
                    // Only fail if we know it is a object field, missing paths / fields shouldn't fail.
                    throw new IllegalArgumentException("field [" + field + "] isn't a leaf field");
                }
            }
        }
    }
    if (!fetchSourceContext.fetchSource()) {
        source = null;
    } else if (fetchSourceContext.includes().length > 0 || fetchSourceContext.excludes().length > 0) {
        Map<String, Object> sourceAsMap;
        XContentType sourceContentType = null;
        // TODO: The source might parsed and available in the sourceLookup but that one uses unordered maps so different. Do we care?
        Tuple<XContentType, Map<String, Object>> typeMapTuple = XContentHelper.convertToMap(source, true);
        sourceContentType = typeMapTuple.v1();
        sourceAsMap = typeMapTuple.v2();
        sourceAsMap = XContentMapValues.filter(sourceAsMap, fetchSourceContext.includes(), fetchSourceContext.excludes());
        try {
            source = XContentFactory.contentBuilder(sourceContentType).map(sourceAsMap).bytes();
        } catch (IOException e) {
            throw new ElasticsearchException("Failed to get type [" + type + "] and id [" + id + "] with includes/excludes set", e);
        }
    }
    return new GetResult(shardId.getIndexName(), type, id, get.version(), get.exists(), source, fields);
}
Also used : BytesReference(org.elasticsearch.common.bytes.BytesReference) FieldsVisitor(org.elasticsearch.index.fieldvisitor.FieldsVisitor) CustomFieldsVisitor(org.elasticsearch.index.fieldvisitor.CustomFieldsVisitor) DocumentMapper(org.elasticsearch.index.mapper.DocumentMapper) IOException(java.io.IOException) ElasticsearchException(org.elasticsearch.ElasticsearchException) Versions(org.elasticsearch.common.lucene.uid.Versions) XContentType(org.elasticsearch.common.xcontent.XContentType) List(java.util.List) HashMap(java.util.HashMap) Map(java.util.Map) UidFieldMapper(org.elasticsearch.index.mapper.UidFieldMapper) FieldMapper(org.elasticsearch.index.mapper.FieldMapper) ParentFieldMapper(org.elasticsearch.index.mapper.ParentFieldMapper) SourceFieldMapper(org.elasticsearch.index.mapper.SourceFieldMapper) Tuple(org.elasticsearch.common.collect.Tuple)

Example 8 with FieldMapper

use of org.elasticsearch.index.mapper.FieldMapper in project elasticsearch by elastic.

the class FastVectorHighlighter method highlight.

@Override
public HighlightField highlight(HighlighterContext highlighterContext) {
    SearchContextHighlight.Field field = highlighterContext.field;
    SearchContext context = highlighterContext.context;
    FetchSubPhase.HitContext hitContext = highlighterContext.hitContext;
    FieldMapper mapper = highlighterContext.mapper;
    if (canHighlight(mapper) == false) {
        throw new IllegalArgumentException("the field [" + highlighterContext.fieldName + "] should be indexed with term vector with position offsets to be used with fast vector highlighter");
    }
    Encoder encoder = field.fieldOptions().encoder().equals("html") ? HighlightUtils.Encoders.HTML : HighlightUtils.Encoders.DEFAULT;
    if (!hitContext.cache().containsKey(CACHE_KEY)) {
        hitContext.cache().put(CACHE_KEY, new HighlighterEntry());
    }
    HighlighterEntry cache = (HighlighterEntry) hitContext.cache().get(CACHE_KEY);
    try {
        FieldQuery fieldQuery;
        if (field.fieldOptions().requireFieldMatch()) {
            if (cache.fieldMatchFieldQuery == null) {
                /*
                     * we use top level reader to rewrite the query against all readers,
                     * with use caching it across hits (and across readers...)
                     */
                cache.fieldMatchFieldQuery = new CustomFieldQuery(highlighterContext.query, hitContext.topLevelReader(), true, field.fieldOptions().requireFieldMatch());
            }
            fieldQuery = cache.fieldMatchFieldQuery;
        } else {
            if (cache.noFieldMatchFieldQuery == null) {
                /*
                     * we use top level reader to rewrite the query against all readers,
                     * with use caching it across hits (and across readers...)
                     */
                cache.noFieldMatchFieldQuery = new CustomFieldQuery(highlighterContext.query, hitContext.topLevelReader(), true, field.fieldOptions().requireFieldMatch());
            }
            fieldQuery = cache.noFieldMatchFieldQuery;
        }
        MapperHighlightEntry entry = cache.mappers.get(mapper);
        if (entry == null) {
            FragListBuilder fragListBuilder;
            BaseFragmentsBuilder fragmentsBuilder;
            final BoundaryScanner boundaryScanner = getBoundaryScanner(field);
            boolean forceSource = context.highlight().forceSource(field);
            if (field.fieldOptions().numberOfFragments() == 0) {
                fragListBuilder = new SingleFragListBuilder();
                if (!forceSource && mapper.fieldType().stored()) {
                    fragmentsBuilder = new SimpleFragmentsBuilder(mapper, field.fieldOptions().preTags(), field.fieldOptions().postTags(), boundaryScanner);
                } else {
                    fragmentsBuilder = new SourceSimpleFragmentsBuilder(mapper, context, field.fieldOptions().preTags(), field.fieldOptions().postTags(), boundaryScanner);
                }
            } else {
                fragListBuilder = field.fieldOptions().fragmentOffset() == -1 ? new SimpleFragListBuilder() : new SimpleFragListBuilder(field.fieldOptions().fragmentOffset());
                if (field.fieldOptions().scoreOrdered()) {
                    if (!forceSource && mapper.fieldType().stored()) {
                        fragmentsBuilder = new ScoreOrderFragmentsBuilder(field.fieldOptions().preTags(), field.fieldOptions().postTags(), boundaryScanner);
                    } else {
                        fragmentsBuilder = new SourceScoreOrderFragmentsBuilder(mapper, context, field.fieldOptions().preTags(), field.fieldOptions().postTags(), boundaryScanner);
                    }
                } else {
                    if (!forceSource && mapper.fieldType().stored()) {
                        fragmentsBuilder = new SimpleFragmentsBuilder(mapper, field.fieldOptions().preTags(), field.fieldOptions().postTags(), boundaryScanner);
                    } else {
                        fragmentsBuilder = new SourceSimpleFragmentsBuilder(mapper, context, field.fieldOptions().preTags(), field.fieldOptions().postTags(), boundaryScanner);
                    }
                }
            }
            fragmentsBuilder.setDiscreteMultiValueHighlighting(termVectorMultiValue);
            entry = new MapperHighlightEntry();
            entry.fragListBuilder = fragListBuilder;
            entry.fragmentsBuilder = fragmentsBuilder;
            if (cache.fvh == null) {
                // parameters to FVH are not requires since:
                // first two booleans are not relevant since they are set on the CustomFieldQuery
                // (phrase and fieldMatch) fragment builders are used explicitly
                cache.fvh = new org.apache.lucene.search.vectorhighlight.FastVectorHighlighter();
            }
            CustomFieldQuery.highlightFilters.set(field.fieldOptions().highlightFilter());
            cache.mappers.put(mapper, entry);
        }
        cache.fvh.setPhraseLimit(field.fieldOptions().phraseLimit());
        String[] fragments;
        // a HACK to make highlighter do highlighting, even though its using the single frag list builder
        int numberOfFragments = field.fieldOptions().numberOfFragments() == 0 ? Integer.MAX_VALUE : field.fieldOptions().numberOfFragments();
        int fragmentCharSize = field.fieldOptions().numberOfFragments() == 0 ? Integer.MAX_VALUE : field.fieldOptions().fragmentCharSize();
        // Only send matched fields if they were requested to save time.
        if (field.fieldOptions().matchedFields() != null && !field.fieldOptions().matchedFields().isEmpty()) {
            fragments = cache.fvh.getBestFragments(fieldQuery, hitContext.reader(), hitContext.docId(), mapper.fieldType().name(), field.fieldOptions().matchedFields(), fragmentCharSize, numberOfFragments, entry.fragListBuilder, entry.fragmentsBuilder, field.fieldOptions().preTags(), field.fieldOptions().postTags(), encoder);
        } else {
            fragments = cache.fvh.getBestFragments(fieldQuery, hitContext.reader(), hitContext.docId(), mapper.fieldType().name(), fragmentCharSize, numberOfFragments, entry.fragListBuilder, entry.fragmentsBuilder, field.fieldOptions().preTags(), field.fieldOptions().postTags(), encoder);
        }
        if (fragments != null && fragments.length > 0) {
            return new HighlightField(highlighterContext.fieldName, Text.convertFromStringArray(fragments));
        }
        int noMatchSize = highlighterContext.field.fieldOptions().noMatchSize();
        if (noMatchSize > 0) {
            // Essentially we just request that a fragment is built from 0 to noMatchSize using
            // the normal fragmentsBuilder
            FieldFragList fieldFragList = new SimpleFieldFragList(-1);
            fieldFragList.add(0, noMatchSize, Collections.<WeightedPhraseInfo>emptyList());
            fragments = entry.fragmentsBuilder.createFragments(hitContext.reader(), hitContext.docId(), mapper.fieldType().name(), fieldFragList, 1, field.fieldOptions().preTags(), field.fieldOptions().postTags(), encoder);
            if (fragments != null && fragments.length > 0) {
                return new HighlightField(highlighterContext.fieldName, Text.convertFromStringArray(fragments));
            }
        }
        return null;
    } catch (Exception e) {
        throw new FetchPhaseExecutionException(context, "Failed to highlight field [" + highlighterContext.fieldName + "]", e);
    }
}
Also used : SimpleFieldFragList(org.apache.lucene.search.vectorhighlight.SimpleFieldFragList) SingleFragListBuilder(org.apache.lucene.search.vectorhighlight.SingleFragListBuilder) SimpleFragListBuilder(org.apache.lucene.search.vectorhighlight.SimpleFragListBuilder) FragListBuilder(org.apache.lucene.search.vectorhighlight.FragListBuilder) SearchContext(org.elasticsearch.search.internal.SearchContext) BaseFragmentsBuilder(org.apache.lucene.search.vectorhighlight.BaseFragmentsBuilder) ScoreOrderFragmentsBuilder(org.apache.lucene.search.vectorhighlight.ScoreOrderFragmentsBuilder) CustomFieldQuery(org.apache.lucene.search.vectorhighlight.CustomFieldQuery) Encoder(org.apache.lucene.search.highlight.Encoder) FetchSubPhase(org.elasticsearch.search.fetch.FetchSubPhase) CustomFieldQuery(org.apache.lucene.search.vectorhighlight.CustomFieldQuery) FieldQuery(org.apache.lucene.search.vectorhighlight.FieldQuery) Field(org.elasticsearch.search.fetch.subphase.highlight.SearchContextHighlight.Field) FetchPhaseExecutionException(org.elasticsearch.search.fetch.FetchPhaseExecutionException) SimpleFragListBuilder(org.apache.lucene.search.vectorhighlight.SimpleFragListBuilder) FetchPhaseExecutionException(org.elasticsearch.search.fetch.FetchPhaseExecutionException) BoundaryScanner(org.apache.lucene.search.vectorhighlight.BoundaryScanner) BreakIteratorBoundaryScanner(org.apache.lucene.search.vectorhighlight.BreakIteratorBoundaryScanner) SimpleBoundaryScanner(org.apache.lucene.search.vectorhighlight.SimpleBoundaryScanner) FieldFragList(org.apache.lucene.search.vectorhighlight.FieldFragList) SimpleFieldFragList(org.apache.lucene.search.vectorhighlight.SimpleFieldFragList) SingleFragListBuilder(org.apache.lucene.search.vectorhighlight.SingleFragListBuilder) FieldMapper(org.elasticsearch.index.mapper.FieldMapper)

Example 9 with FieldMapper

use of org.elasticsearch.index.mapper.FieldMapper in project elasticsearch by elastic.

the class PlainHighlighter method highlight.

@Override
public HighlightField highlight(HighlighterContext highlighterContext) {
    SearchContextHighlight.Field field = highlighterContext.field;
    SearchContext context = highlighterContext.context;
    FetchSubPhase.HitContext hitContext = highlighterContext.hitContext;
    FieldMapper mapper = highlighterContext.mapper;
    Encoder encoder = field.fieldOptions().encoder().equals("html") ? HighlightUtils.Encoders.HTML : HighlightUtils.Encoders.DEFAULT;
    if (!hitContext.cache().containsKey(CACHE_KEY)) {
        Map<FieldMapper, org.apache.lucene.search.highlight.Highlighter> mappers = new HashMap<>();
        hitContext.cache().put(CACHE_KEY, mappers);
    }
    @SuppressWarnings("unchecked") Map<FieldMapper, org.apache.lucene.search.highlight.Highlighter> cache = (Map<FieldMapper, org.apache.lucene.search.highlight.Highlighter>) hitContext.cache().get(CACHE_KEY);
    org.apache.lucene.search.highlight.Highlighter entry = cache.get(mapper);
    if (entry == null) {
        QueryScorer queryScorer = new CustomQueryScorer(highlighterContext.query, field.fieldOptions().requireFieldMatch() ? mapper.fieldType().name() : null);
        queryScorer.setExpandMultiTermQuery(true);
        Fragmenter fragmenter;
        if (field.fieldOptions().numberOfFragments() == 0) {
            fragmenter = new NullFragmenter();
        } else if (field.fieldOptions().fragmenter() == null) {
            fragmenter = new SimpleSpanFragmenter(queryScorer, field.fieldOptions().fragmentCharSize());
        } else if ("simple".equals(field.fieldOptions().fragmenter())) {
            fragmenter = new SimpleFragmenter(field.fieldOptions().fragmentCharSize());
        } else if ("span".equals(field.fieldOptions().fragmenter())) {
            fragmenter = new SimpleSpanFragmenter(queryScorer, field.fieldOptions().fragmentCharSize());
        } else {
            throw new IllegalArgumentException("unknown fragmenter option [" + field.fieldOptions().fragmenter() + "] for the field [" + highlighterContext.fieldName + "]");
        }
        Formatter formatter = new SimpleHTMLFormatter(field.fieldOptions().preTags()[0], field.fieldOptions().postTags()[0]);
        entry = new org.apache.lucene.search.highlight.Highlighter(formatter, encoder, queryScorer);
        entry.setTextFragmenter(fragmenter);
        // always highlight across all data
        entry.setMaxDocCharsToAnalyze(Integer.MAX_VALUE);
        cache.put(mapper, entry);
    }
    // a HACK to make highlighter do highlighting, even though its using the single frag list builder
    int numberOfFragments = field.fieldOptions().numberOfFragments() == 0 ? 1 : field.fieldOptions().numberOfFragments();
    ArrayList<TextFragment> fragsList = new ArrayList<>();
    List<Object> textsToHighlight;
    Analyzer analyzer = context.mapperService().documentMapper(hitContext.hit().getType()).mappers().indexAnalyzer();
    try {
        textsToHighlight = HighlightUtils.loadFieldValues(field, mapper, context, hitContext);
        for (Object textToHighlight : textsToHighlight) {
            String text;
            if (textToHighlight instanceof BytesRef) {
                text = mapper.fieldType().valueForDisplay(textToHighlight).toString();
            } else {
                text = textToHighlight.toString();
            }
            try (TokenStream tokenStream = analyzer.tokenStream(mapper.fieldType().name(), text)) {
                if (!tokenStream.hasAttribute(CharTermAttribute.class) || !tokenStream.hasAttribute(OffsetAttribute.class)) {
                    // can't perform highlighting if the stream has no terms (binary token stream) or no offsets
                    continue;
                }
                TextFragment[] bestTextFragments = entry.getBestTextFragments(tokenStream, text, false, numberOfFragments);
                for (TextFragment bestTextFragment : bestTextFragments) {
                    if (bestTextFragment != null && bestTextFragment.getScore() > 0) {
                        fragsList.add(bestTextFragment);
                    }
                }
            }
        }
    } catch (Exception e) {
        if (ExceptionsHelper.unwrap(e, BytesRefHash.MaxBytesLengthExceededException.class) != null) {
            // the plain highlighter will parse the source and try to analyze it.
            return null;
        } else {
            throw new FetchPhaseExecutionException(context, "Failed to highlight field [" + highlighterContext.fieldName + "]", e);
        }
    }
    if (field.fieldOptions().scoreOrdered()) {
        CollectionUtil.introSort(fragsList, new Comparator<TextFragment>() {

            @Override
            public int compare(TextFragment o1, TextFragment o2) {
                return Math.round(o2.getScore() - o1.getScore());
            }
        });
    }
    String[] fragments;
    // number_of_fragments is set to 0 but we have a multivalued field
    if (field.fieldOptions().numberOfFragments() == 0 && textsToHighlight.size() > 1 && fragsList.size() > 0) {
        fragments = new String[fragsList.size()];
        for (int i = 0; i < fragsList.size(); i++) {
            fragments[i] = fragsList.get(i).toString();
        }
    } else {
        // refine numberOfFragments if needed
        numberOfFragments = fragsList.size() < numberOfFragments ? fragsList.size() : numberOfFragments;
        fragments = new String[numberOfFragments];
        for (int i = 0; i < fragments.length; i++) {
            fragments[i] = fragsList.get(i).toString();
        }
    }
    if (fragments.length > 0) {
        return new HighlightField(highlighterContext.fieldName, Text.convertFromStringArray(fragments));
    }
    int noMatchSize = highlighterContext.field.fieldOptions().noMatchSize();
    if (noMatchSize > 0 && textsToHighlight.size() > 0) {
        // Pull an excerpt from the beginning of the string but make sure to split the string on a term boundary.
        String fieldContents = textsToHighlight.get(0).toString();
        int end;
        try {
            end = findGoodEndForNoHighlightExcerpt(noMatchSize, analyzer, mapper.fieldType().name(), fieldContents);
        } catch (Exception e) {
            throw new FetchPhaseExecutionException(context, "Failed to highlight field [" + highlighterContext.fieldName + "]", e);
        }
        if (end > 0) {
            return new HighlightField(highlighterContext.fieldName, new Text[] { new Text(fieldContents.substring(0, end)) });
        }
    }
    return null;
}
Also used : TokenStream(org.apache.lucene.analysis.TokenStream) HashMap(java.util.HashMap) Formatter(org.apache.lucene.search.highlight.Formatter) SimpleHTMLFormatter(org.apache.lucene.search.highlight.SimpleHTMLFormatter) ArrayList(java.util.ArrayList) SearchContext(org.elasticsearch.search.internal.SearchContext) TextFragment(org.apache.lucene.search.highlight.TextFragment) Analyzer(org.apache.lucene.analysis.Analyzer) SimpleFragmenter(org.apache.lucene.search.highlight.SimpleFragmenter) Encoder(org.apache.lucene.search.highlight.Encoder) SimpleFragmenter(org.apache.lucene.search.highlight.SimpleFragmenter) Fragmenter(org.apache.lucene.search.highlight.Fragmenter) SimpleSpanFragmenter(org.apache.lucene.search.highlight.SimpleSpanFragmenter) NullFragmenter(org.apache.lucene.search.highlight.NullFragmenter) FetchSubPhase(org.elasticsearch.search.fetch.FetchSubPhase) BytesRefHash(org.apache.lucene.util.BytesRefHash) BytesRef(org.apache.lucene.util.BytesRef) SimpleSpanFragmenter(org.apache.lucene.search.highlight.SimpleSpanFragmenter) QueryScorer(org.apache.lucene.search.highlight.QueryScorer) Text(org.elasticsearch.common.text.Text) NullFragmenter(org.apache.lucene.search.highlight.NullFragmenter) FetchPhaseExecutionException(org.elasticsearch.search.fetch.FetchPhaseExecutionException) IOException(java.io.IOException) FetchPhaseExecutionException(org.elasticsearch.search.fetch.FetchPhaseExecutionException) SimpleHTMLFormatter(org.apache.lucene.search.highlight.SimpleHTMLFormatter) FieldMapper(org.elasticsearch.index.mapper.FieldMapper) HashMap(java.util.HashMap) Map(java.util.Map)

Example 10 with FieldMapper

use of org.elasticsearch.index.mapper.FieldMapper in project elasticsearch by elastic.

the class PostingsHighlighter method highlight.

@Override
public HighlightField highlight(HighlighterContext highlighterContext) {
    FieldMapper fieldMapper = highlighterContext.mapper;
    SearchContextHighlight.Field field = highlighterContext.field;
    if (canHighlight(fieldMapper) == false) {
        throw new IllegalArgumentException("the field [" + highlighterContext.fieldName + "] should be indexed with positions and offsets in the postings list to be used with postings highlighter");
    }
    SearchContext context = highlighterContext.context;
    FetchSubPhase.HitContext hitContext = highlighterContext.hitContext;
    if (!hitContext.cache().containsKey(CACHE_KEY)) {
        hitContext.cache().put(CACHE_KEY, new HighlighterEntry());
    }
    HighlighterEntry highlighterEntry = (HighlighterEntry) hitContext.cache().get(CACHE_KEY);
    MapperHighlighterEntry mapperHighlighterEntry = highlighterEntry.mappers.get(fieldMapper);
    if (mapperHighlighterEntry == null) {
        Encoder encoder = field.fieldOptions().encoder().equals("html") ? Encoders.HTML : Encoders.DEFAULT;
        CustomPassageFormatter passageFormatter = new CustomPassageFormatter(field.fieldOptions().preTags()[0], field.fieldOptions().postTags()[0], encoder);
        mapperHighlighterEntry = new MapperHighlighterEntry(passageFormatter);
    }
    List<Snippet> snippets = new ArrayList<>();
    int numberOfFragments;
    try {
        Analyzer analyzer = context.mapperService().documentMapper(hitContext.hit().getType()).mappers().indexAnalyzer();
        List<Object> fieldValues = HighlightUtils.loadFieldValues(field, fieldMapper, context, hitContext);
        CustomPostingsHighlighter highlighter;
        if (field.fieldOptions().numberOfFragments() == 0) {
            //we use a control char to separate values, which is the only char that the custom break iterator breaks the text on,
            //so we don't lose the distinction between the different values of a field and we get back a snippet per value
            String fieldValue = mergeFieldValues(fieldValues, HighlightUtils.NULL_SEPARATOR);
            CustomSeparatorBreakIterator breakIterator = new CustomSeparatorBreakIterator(HighlightUtils.NULL_SEPARATOR);
            highlighter = new CustomPostingsHighlighter(analyzer, mapperHighlighterEntry.passageFormatter, breakIterator, fieldValue, field.fieldOptions().noMatchSize() > 0);
            //we are highlighting the whole content, one snippet per value
            numberOfFragments = fieldValues.size();
        } else {
            //using paragraph separator we make sure that each field value holds a discrete passage for highlighting
            String fieldValue = mergeFieldValues(fieldValues, HighlightUtils.PARAGRAPH_SEPARATOR);
            highlighter = new CustomPostingsHighlighter(analyzer, mapperHighlighterEntry.passageFormatter, fieldValue, field.fieldOptions().noMatchSize() > 0);
            numberOfFragments = field.fieldOptions().numberOfFragments();
        }
        IndexSearcher searcher = new IndexSearcher(hitContext.reader());
        Snippet[] fieldSnippets = highlighter.highlightField(fieldMapper.fieldType().name(), highlighterContext.query, searcher, hitContext.docId(), numberOfFragments);
        for (Snippet fieldSnippet : fieldSnippets) {
            if (Strings.hasText(fieldSnippet.getText())) {
                snippets.add(fieldSnippet);
            }
        }
    } catch (IOException e) {
        throw new FetchPhaseExecutionException(context, "Failed to highlight field [" + highlighterContext.fieldName + "]", e);
    }
    snippets = filterSnippets(snippets, field.fieldOptions().numberOfFragments());
    if (field.fieldOptions().scoreOrdered()) {
        //let's sort the snippets by score if needed
        CollectionUtil.introSort(snippets, new Comparator<Snippet>() {

            @Override
            public int compare(Snippet o1, Snippet o2) {
                return (int) Math.signum(o2.getScore() - o1.getScore());
            }
        });
    }
    String[] fragments = new String[snippets.size()];
    for (int i = 0; i < fragments.length; i++) {
        fragments[i] = snippets.get(i).getText();
    }
    if (fragments.length > 0) {
        return new HighlightField(highlighterContext.fieldName, Text.convertFromStringArray(fragments));
    }
    return null;
}
Also used : IndexSearcher(org.apache.lucene.search.IndexSearcher) ArrayList(java.util.ArrayList) SearchContext(org.elasticsearch.search.internal.SearchContext) Analyzer(org.apache.lucene.analysis.Analyzer) CustomSeparatorBreakIterator(org.apache.lucene.search.postingshighlight.CustomSeparatorBreakIterator) CustomPostingsHighlighter(org.apache.lucene.search.postingshighlight.CustomPostingsHighlighter) Encoder(org.apache.lucene.search.highlight.Encoder) FetchSubPhase(org.elasticsearch.search.fetch.FetchSubPhase) CustomPassageFormatter(org.apache.lucene.search.postingshighlight.CustomPassageFormatter) Snippet(org.apache.lucene.search.highlight.Snippet) IOException(java.io.IOException) FetchPhaseExecutionException(org.elasticsearch.search.fetch.FetchPhaseExecutionException) FieldMapper(org.elasticsearch.index.mapper.FieldMapper)

Aggregations

FieldMapper (org.elasticsearch.index.mapper.FieldMapper)23 DocumentMapper (org.elasticsearch.index.mapper.DocumentMapper)15 IndexableField (org.apache.lucene.index.IndexableField)12 ParsedDocument (org.elasticsearch.index.mapper.ParsedDocument)12 CompressedXContent (org.elasticsearch.common.compress.CompressedXContent)9 MappedFieldType (org.elasticsearch.index.mapper.MappedFieldType)8 IOException (java.io.IOException)4 Analyzer (org.apache.lucene.analysis.Analyzer)4 Encoder (org.apache.lucene.search.highlight.Encoder)4 BytesRef (org.apache.lucene.util.BytesRef)4 BytesArray (org.elasticsearch.common.bytes.BytesArray)4 IndexService (org.elasticsearch.index.IndexService)4 Document (org.elasticsearch.index.mapper.ParseContext.Document)4 FetchPhaseExecutionException (org.elasticsearch.search.fetch.FetchPhaseExecutionException)4 FetchSubPhase (org.elasticsearch.search.fetch.FetchSubPhase)4 SearchContext (org.elasticsearch.search.internal.SearchContext)4 ArrayList (java.util.ArrayList)3 HashMap (java.util.HashMap)3 Map (java.util.Map)2 IndexSearcher (org.apache.lucene.search.IndexSearcher)2