Search in sources :

Example 1 with IndexSearcher

use of org.apache.lucene.search.IndexSearcher in project elasticsearch by elastic.

the class Engine method acquireSearcher.

/**
     * Returns a new searcher instance. The consumer of this
     * API is responsible for releasing the returned searcher in a
     * safe manner, preferably in a try/finally block.
     *
     * @see Searcher#close()
     */
public final Searcher acquireSearcher(String source) throws EngineException {
    boolean success = false;
    /* Acquire order here is store -> manager since we need
          * to make sure that the store is not closed before
          * the searcher is acquired. */
    store.incRef();
    try {
        // can never be null
        final SearcherManager manager = getSearcherManager();
        /* This might throw NPE but that's fine we will run ensureOpen()
            *  in the catch block and throw the right exception */
        final IndexSearcher searcher = manager.acquire();
        try {
            final Searcher retVal = newSearcher(source, searcher, manager);
            success = true;
            return retVal;
        } finally {
            if (!success) {
                manager.release(searcher);
            }
        }
    } catch (AlreadyClosedException ex) {
        throw ex;
    } catch (Exception ex) {
        // throw EngineCloseException here if we are already closed
        ensureOpen();
        logger.error((Supplier<?>) () -> new ParameterizedMessage("failed to acquire searcher, source {}", source), ex);
        throw new EngineException(shardId, "failed to acquire searcher, source " + source, ex);
    } finally {
        if (!success) {
            // release the ref in the case of an error...
            store.decRef();
        }
    }
}
Also used : IndexSearcher(org.apache.lucene.search.IndexSearcher) IndexSearcher(org.apache.lucene.search.IndexSearcher) Supplier(org.apache.logging.log4j.util.Supplier) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) SearcherManager(org.apache.lucene.search.SearcherManager) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) NoSuchFileException(java.nio.file.NoSuchFileException) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException)

Example 2 with IndexSearcher

use of org.apache.lucene.search.IndexSearcher in project elasticsearch by elastic.

the class PostingsHighlighter method highlight.

@Override
public HighlightField highlight(HighlighterContext highlighterContext) {
    FieldMapper fieldMapper = highlighterContext.mapper;
    SearchContextHighlight.Field field = highlighterContext.field;
    if (canHighlight(fieldMapper) == false) {
        throw new IllegalArgumentException("the field [" + highlighterContext.fieldName + "] should be indexed with positions and offsets in the postings list to be used with postings highlighter");
    }
    SearchContext context = highlighterContext.context;
    FetchSubPhase.HitContext hitContext = highlighterContext.hitContext;
    if (!hitContext.cache().containsKey(CACHE_KEY)) {
        hitContext.cache().put(CACHE_KEY, new HighlighterEntry());
    }
    HighlighterEntry highlighterEntry = (HighlighterEntry) hitContext.cache().get(CACHE_KEY);
    MapperHighlighterEntry mapperHighlighterEntry = highlighterEntry.mappers.get(fieldMapper);
    if (mapperHighlighterEntry == null) {
        Encoder encoder = field.fieldOptions().encoder().equals("html") ? Encoders.HTML : Encoders.DEFAULT;
        CustomPassageFormatter passageFormatter = new CustomPassageFormatter(field.fieldOptions().preTags()[0], field.fieldOptions().postTags()[0], encoder);
        mapperHighlighterEntry = new MapperHighlighterEntry(passageFormatter);
    }
    List<Snippet> snippets = new ArrayList<>();
    int numberOfFragments;
    try {
        Analyzer analyzer = context.mapperService().documentMapper(hitContext.hit().getType()).mappers().indexAnalyzer();
        List<Object> fieldValues = HighlightUtils.loadFieldValues(field, fieldMapper, context, hitContext);
        CustomPostingsHighlighter highlighter;
        if (field.fieldOptions().numberOfFragments() == 0) {
            //we use a control char to separate values, which is the only char that the custom break iterator breaks the text on,
            //so we don't lose the distinction between the different values of a field and we get back a snippet per value
            String fieldValue = mergeFieldValues(fieldValues, HighlightUtils.NULL_SEPARATOR);
            CustomSeparatorBreakIterator breakIterator = new CustomSeparatorBreakIterator(HighlightUtils.NULL_SEPARATOR);
            highlighter = new CustomPostingsHighlighter(analyzer, mapperHighlighterEntry.passageFormatter, breakIterator, fieldValue, field.fieldOptions().noMatchSize() > 0);
            //we are highlighting the whole content, one snippet per value
            numberOfFragments = fieldValues.size();
        } else {
            //using paragraph separator we make sure that each field value holds a discrete passage for highlighting
            String fieldValue = mergeFieldValues(fieldValues, HighlightUtils.PARAGRAPH_SEPARATOR);
            highlighter = new CustomPostingsHighlighter(analyzer, mapperHighlighterEntry.passageFormatter, fieldValue, field.fieldOptions().noMatchSize() > 0);
            numberOfFragments = field.fieldOptions().numberOfFragments();
        }
        IndexSearcher searcher = new IndexSearcher(hitContext.reader());
        Snippet[] fieldSnippets = highlighter.highlightField(fieldMapper.fieldType().name(), highlighterContext.query, searcher, hitContext.docId(), numberOfFragments);
        for (Snippet fieldSnippet : fieldSnippets) {
            if (Strings.hasText(fieldSnippet.getText())) {
                snippets.add(fieldSnippet);
            }
        }
    } catch (IOException e) {
        throw new FetchPhaseExecutionException(context, "Failed to highlight field [" + highlighterContext.fieldName + "]", e);
    }
    snippets = filterSnippets(snippets, field.fieldOptions().numberOfFragments());
    if (field.fieldOptions().scoreOrdered()) {
        //let's sort the snippets by score if needed
        CollectionUtil.introSort(snippets, new Comparator<Snippet>() {

            @Override
            public int compare(Snippet o1, Snippet o2) {
                return (int) Math.signum(o2.getScore() - o1.getScore());
            }
        });
    }
    String[] fragments = new String[snippets.size()];
    for (int i = 0; i < fragments.length; i++) {
        fragments[i] = snippets.get(i).getText();
    }
    if (fragments.length > 0) {
        return new HighlightField(highlighterContext.fieldName, Text.convertFromStringArray(fragments));
    }
    return null;
}
Also used : IndexSearcher(org.apache.lucene.search.IndexSearcher) ArrayList(java.util.ArrayList) SearchContext(org.elasticsearch.search.internal.SearchContext) Analyzer(org.apache.lucene.analysis.Analyzer) CustomSeparatorBreakIterator(org.apache.lucene.search.postingshighlight.CustomSeparatorBreakIterator) CustomPostingsHighlighter(org.apache.lucene.search.postingshighlight.CustomPostingsHighlighter) Encoder(org.apache.lucene.search.highlight.Encoder) FetchSubPhase(org.elasticsearch.search.fetch.FetchSubPhase) CustomPassageFormatter(org.apache.lucene.search.postingshighlight.CustomPassageFormatter) Snippet(org.apache.lucene.search.highlight.Snippet) IOException(java.io.IOException) FetchPhaseExecutionException(org.elasticsearch.search.fetch.FetchPhaseExecutionException) FieldMapper(org.elasticsearch.index.mapper.FieldMapper)

Example 3 with IndexSearcher

use of org.apache.lucene.search.IndexSearcher in project elasticsearch by elastic.

the class UnifiedHighlighter method highlight.

@Override
public HighlightField highlight(HighlighterContext highlighterContext) {
    FieldMapper fieldMapper = highlighterContext.mapper;
    SearchContextHighlight.Field field = highlighterContext.field;
    SearchContext context = highlighterContext.context;
    FetchSubPhase.HitContext hitContext = highlighterContext.hitContext;
    if (!hitContext.cache().containsKey(CACHE_KEY)) {
        hitContext.cache().put(CACHE_KEY, new HighlighterEntry());
    }
    HighlighterEntry highlighterEntry = (HighlighterEntry) hitContext.cache().get(CACHE_KEY);
    MapperHighlighterEntry mapperHighlighterEntry = highlighterEntry.mappers.get(fieldMapper);
    if (mapperHighlighterEntry == null) {
        Encoder encoder = field.fieldOptions().encoder().equals("html") ? HighlightUtils.Encoders.HTML : HighlightUtils.Encoders.DEFAULT;
        CustomPassageFormatter passageFormatter = new CustomPassageFormatter(field.fieldOptions().preTags()[0], field.fieldOptions().postTags()[0], encoder);
        mapperHighlighterEntry = new MapperHighlighterEntry(passageFormatter);
    }
    List<Snippet> snippets = new ArrayList<>();
    int numberOfFragments;
    try {
        Analyzer analyzer = context.mapperService().documentMapper(hitContext.hit().getType()).mappers().indexAnalyzer();
        List<Object> fieldValues = HighlightUtils.loadFieldValues(field, fieldMapper, context, hitContext);
        fieldValues = fieldValues.stream().map(obj -> {
            if (obj instanceof BytesRef) {
                return fieldMapper.fieldType().valueForDisplay(obj).toString();
            } else {
                return obj;
            }
        }).collect(Collectors.toList());
        IndexSearcher searcher = new IndexSearcher(hitContext.reader());
        CustomUnifiedHighlighter highlighter;
        if (field.fieldOptions().numberOfFragments() == 0) {
            // we use a control char to separate values, which is the only char that the custom break iterator
            // breaks the text on, so we don't lose the distinction between the different values of a field and we
            // get back a snippet per value
            String fieldValue = mergeFieldValues(fieldValues, MULTIVAL_SEP_CHAR);
            org.apache.lucene.search.postingshighlight.CustomSeparatorBreakIterator breakIterator = new org.apache.lucene.search.postingshighlight.CustomSeparatorBreakIterator(MULTIVAL_SEP_CHAR);
            highlighter = new CustomUnifiedHighlighter(searcher, analyzer, mapperHighlighterEntry.passageFormatter, field.fieldOptions().boundaryScannerLocale(), breakIterator, fieldValue, field.fieldOptions().noMatchSize());
            // we are highlighting the whole content, one snippet per value
            numberOfFragments = fieldValues.size();
        } else {
            //using paragraph separator we make sure that each field value holds a discrete passage for highlighting
            String fieldValue = mergeFieldValues(fieldValues, MULTIVAL_SEP_CHAR);
            BreakIterator bi = getBreakIterator(field);
            highlighter = new CustomUnifiedHighlighter(searcher, analyzer, mapperHighlighterEntry.passageFormatter, field.fieldOptions().boundaryScannerLocale(), bi, fieldValue, field.fieldOptions().noMatchSize());
            numberOfFragments = field.fieldOptions().numberOfFragments();
        }
        if (field.fieldOptions().requireFieldMatch()) {
            final String fieldName = highlighterContext.fieldName;
            highlighter.setFieldMatcher((name) -> fieldName.equals(name));
        } else {
            highlighter.setFieldMatcher((name) -> true);
        }
        Snippet[] fieldSnippets = highlighter.highlightField(highlighterContext.fieldName, highlighterContext.query, hitContext.docId(), numberOfFragments);
        for (Snippet fieldSnippet : fieldSnippets) {
            if (Strings.hasText(fieldSnippet.getText())) {
                snippets.add(fieldSnippet);
            }
        }
    } catch (IOException e) {
        throw new FetchPhaseExecutionException(context, "Failed to highlight field [" + highlighterContext.fieldName + "]", e);
    }
    snippets = filterSnippets(snippets, field.fieldOptions().numberOfFragments());
    if (field.fieldOptions().scoreOrdered()) {
        //let's sort the snippets by score if needed
        CollectionUtil.introSort(snippets, (o1, o2) -> Double.compare(o2.getScore(), o1.getScore()));
    }
    String[] fragments = new String[snippets.size()];
    for (int i = 0; i < fragments.length; i++) {
        fragments[i] = snippets.get(i).getText();
    }
    if (fragments.length > 0) {
        return new HighlightField(highlighterContext.fieldName, Text.convertFromStringArray(fragments));
    }
    return null;
}
Also used : IndexSearcher(org.apache.lucene.search.IndexSearcher) ArrayList(java.util.ArrayList) SearchContext(org.elasticsearch.search.internal.SearchContext) Analyzer(org.apache.lucene.analysis.Analyzer) BreakIterator(java.text.BreakIterator) Encoder(org.apache.lucene.search.highlight.Encoder) FetchSubPhase(org.elasticsearch.search.fetch.FetchSubPhase) CustomPassageFormatter(org.apache.lucene.search.uhighlight.CustomPassageFormatter) BytesRef(org.apache.lucene.util.BytesRef) CustomUnifiedHighlighter(org.apache.lucene.search.uhighlight.CustomUnifiedHighlighter) Snippet(org.apache.lucene.search.highlight.Snippet) IOException(java.io.IOException) FetchPhaseExecutionException(org.elasticsearch.search.fetch.FetchPhaseExecutionException) FieldMapper(org.elasticsearch.index.mapper.FieldMapper)

Example 4 with IndexSearcher

use of org.apache.lucene.search.IndexSearcher in project elasticsearch by elastic.

the class TermVectorsUnitTests method writeEmptyTermVector.

private void writeEmptyTermVector(TermVectorsResponse outResponse) throws IOException {
    Directory dir = newDirectory();
    IndexWriterConfig conf = new IndexWriterConfig(new StandardAnalyzer());
    conf.setOpenMode(OpenMode.CREATE);
    IndexWriter writer = new IndexWriter(dir, conf);
    FieldType type = new FieldType(TextField.TYPE_STORED);
    type.setStoreTermVectorOffsets(true);
    type.setStoreTermVectorPayloads(false);
    type.setStoreTermVectorPositions(true);
    type.setStoreTermVectors(true);
    type.freeze();
    Document d = new Document();
    d.add(new Field("id", "abc", StringField.TYPE_STORED));
    writer.updateDocument(new Term("id", "abc"), d);
    writer.commit();
    writer.close();
    DirectoryReader dr = DirectoryReader.open(dir);
    IndexSearcher s = new IndexSearcher(dr);
    TopDocs search = s.search(new TermQuery(new Term("id", "abc")), 1);
    ScoreDoc[] scoreDocs = search.scoreDocs;
    int doc = scoreDocs[0].doc;
    Fields fields = dr.getTermVectors(doc);
    EnumSet<Flag> flags = EnumSet.of(Flag.Positions, Flag.Offsets);
    outResponse.setFields(fields, null, flags, fields);
    outResponse.setExists(true);
    dr.close();
    dir.close();
}
Also used : IndexSearcher(org.apache.lucene.search.IndexSearcher) TermQuery(org.apache.lucene.search.TermQuery) DirectoryReader(org.apache.lucene.index.DirectoryReader) Term(org.apache.lucene.index.Term) Document(org.apache.lucene.document.Document) Flag(org.elasticsearch.action.termvectors.TermVectorsRequest.Flag) FieldType(org.apache.lucene.document.FieldType) ScoreDoc(org.apache.lucene.search.ScoreDoc) TopDocs(org.apache.lucene.search.TopDocs) StringField(org.apache.lucene.document.StringField) Field(org.apache.lucene.document.Field) TextField(org.apache.lucene.document.TextField) Fields(org.apache.lucene.index.Fields) IndexWriter(org.apache.lucene.index.IndexWriter) StandardAnalyzer(org.apache.lucene.analysis.standard.StandardAnalyzer) Directory(org.apache.lucene.store.Directory) IndexWriterConfig(org.apache.lucene.index.IndexWriterConfig)

Example 5 with IndexSearcher

use of org.apache.lucene.search.IndexSearcher in project elasticsearch by elastic.

the class TermVectorsUnitTests method writeStandardTermVector.

private void writeStandardTermVector(TermVectorsResponse outResponse) throws IOException {
    Directory dir = newDirectory();
    IndexWriterConfig conf = new IndexWriterConfig(new StandardAnalyzer());
    conf.setOpenMode(OpenMode.CREATE);
    IndexWriter writer = new IndexWriter(dir, conf);
    FieldType type = new FieldType(TextField.TYPE_STORED);
    type.setStoreTermVectorOffsets(true);
    type.setStoreTermVectorPayloads(false);
    type.setStoreTermVectorPositions(true);
    type.setStoreTermVectors(true);
    type.freeze();
    Document d = new Document();
    d.add(new Field("id", "abc", StringField.TYPE_STORED));
    d.add(new Field("title", "the1 quick brown fox jumps over  the1 lazy dog", type));
    d.add(new Field("desc", "the1 quick brown fox jumps over  the1 lazy dog", type));
    writer.updateDocument(new Term("id", "abc"), d);
    writer.commit();
    writer.close();
    DirectoryReader dr = DirectoryReader.open(dir);
    IndexSearcher s = new IndexSearcher(dr);
    TopDocs search = s.search(new TermQuery(new Term("id", "abc")), 1);
    ScoreDoc[] scoreDocs = search.scoreDocs;
    int doc = scoreDocs[0].doc;
    Fields termVectors = dr.getTermVectors(doc);
    EnumSet<Flag> flags = EnumSet.of(Flag.Positions, Flag.Offsets);
    outResponse.setFields(termVectors, null, flags, termVectors);
    dr.close();
    dir.close();
}
Also used : IndexSearcher(org.apache.lucene.search.IndexSearcher) TermQuery(org.apache.lucene.search.TermQuery) DirectoryReader(org.apache.lucene.index.DirectoryReader) Term(org.apache.lucene.index.Term) Document(org.apache.lucene.document.Document) Flag(org.elasticsearch.action.termvectors.TermVectorsRequest.Flag) FieldType(org.apache.lucene.document.FieldType) ScoreDoc(org.apache.lucene.search.ScoreDoc) TopDocs(org.apache.lucene.search.TopDocs) StringField(org.apache.lucene.document.StringField) Field(org.apache.lucene.document.Field) TextField(org.apache.lucene.document.TextField) Fields(org.apache.lucene.index.Fields) IndexWriter(org.apache.lucene.index.IndexWriter) StandardAnalyzer(org.apache.lucene.analysis.standard.StandardAnalyzer) Directory(org.apache.lucene.store.Directory) IndexWriterConfig(org.apache.lucene.index.IndexWriterConfig)

Aggregations

IndexSearcher (org.apache.lucene.search.IndexSearcher)929 Document (org.apache.lucene.document.Document)529 IndexReader (org.apache.lucene.index.IndexReader)431 Directory (org.apache.lucene.store.Directory)407 TopDocs (org.apache.lucene.search.TopDocs)383 TermQuery (org.apache.lucene.search.TermQuery)333 RandomIndexWriter (org.apache.lucene.index.RandomIndexWriter)311 Query (org.apache.lucene.search.Query)300 Term (org.apache.lucene.index.Term)287 MatchAllDocsQuery (org.apache.lucene.search.MatchAllDocsQuery)230 BooleanQuery (org.apache.lucene.search.BooleanQuery)177 IOException (java.io.IOException)155 Field (org.apache.lucene.document.Field)140 IndexWriter (org.apache.lucene.index.IndexWriter)139 ScoreDoc (org.apache.lucene.search.ScoreDoc)136 MockAnalyzer (org.apache.lucene.analysis.MockAnalyzer)122 Sort (org.apache.lucene.search.Sort)114 IndexWriterConfig (org.apache.lucene.index.IndexWriterConfig)111 ArrayList (java.util.ArrayList)110 DirectoryReader (org.apache.lucene.index.DirectoryReader)110