Search in sources :

Example 1 with DirectSpellChecker

use of org.apache.lucene.search.spell.DirectSpellChecker in project elasticsearch by elastic.

the class TermSuggester method innerExecute.

@Override
public TermSuggestion innerExecute(String name, TermSuggestionContext suggestion, IndexSearcher searcher, CharsRefBuilder spare) throws IOException {
    DirectSpellChecker directSpellChecker = suggestion.getDirectSpellCheckerSettings().createDirectSpellChecker();
    final IndexReader indexReader = searcher.getIndexReader();
    TermSuggestion response = new TermSuggestion(name, suggestion.getSize(), suggestion.getDirectSpellCheckerSettings().sort());
    List<Token> tokens = queryTerms(suggestion, spare);
    for (Token token : tokens) {
        // TODO: Extend DirectSpellChecker in 4.1, to get the raw suggested words as BytesRef
        SuggestWord[] suggestedWords = directSpellChecker.suggestSimilar(token.term, suggestion.getShardSize(), indexReader, suggestion.getDirectSpellCheckerSettings().suggestMode());
        Text key = new Text(new BytesArray(token.term.bytes()));
        TermSuggestion.Entry resultEntry = new TermSuggestion.Entry(key, token.startOffset, token.endOffset - token.startOffset);
        for (SuggestWord suggestWord : suggestedWords) {
            Text word = new Text(suggestWord.string);
            resultEntry.addOption(new TermSuggestion.Entry.Option(word, suggestWord.freq, suggestWord.score));
        }
        response.addTerm(resultEntry);
    }
    return response;
}
Also used : BytesArray(org.elasticsearch.common.bytes.BytesArray) IndexReader(org.apache.lucene.index.IndexReader) SuggestWord(org.apache.lucene.search.spell.SuggestWord) Text(org.elasticsearch.common.text.Text) DirectSpellChecker(org.apache.lucene.search.spell.DirectSpellChecker)

Example 2 with DirectSpellChecker

use of org.apache.lucene.search.spell.DirectSpellChecker in project elasticsearch by elastic.

the class DirectSpellcheckerSettings method createDirectSpellChecker.

public DirectSpellChecker createDirectSpellChecker() {
    DirectSpellChecker directSpellChecker = new DirectSpellChecker();
    directSpellChecker.setAccuracy(accuracy());
    Comparator<SuggestWord> comparator;
    switch(sort()) {
        case SCORE:
            comparator = SCORE_COMPARATOR;
            break;
        case FREQUENCY:
            comparator = LUCENE_FREQUENCY;
            break;
        default:
            throw new IllegalArgumentException("Illegal suggest sort: " + sort());
    }
    directSpellChecker.setComparator(comparator);
    directSpellChecker.setDistance(stringDistance());
    directSpellChecker.setMaxEdits(maxEdits());
    directSpellChecker.setMaxInspections(maxInspections());
    directSpellChecker.setMaxQueryFrequency(maxTermFreq());
    directSpellChecker.setMinPrefix(prefixLength());
    directSpellChecker.setMinQueryLength(minWordLength());
    directSpellChecker.setThresholdFrequency(minDocFreq());
    directSpellChecker.setLowerCaseTerms(false);
    return directSpellChecker;
}
Also used : SuggestWord(org.apache.lucene.search.spell.SuggestWord) DirectSpellChecker(org.apache.lucene.search.spell.DirectSpellChecker)

Example 3 with DirectSpellChecker

use of org.apache.lucene.search.spell.DirectSpellChecker in project elasticsearch by elastic.

the class NoisyChannelSpellCheckerTests method testMultiGenerator.

public void testMultiGenerator() throws IOException {
    RAMDirectory dir = new RAMDirectory();
    Map<String, Analyzer> mapping = new HashMap<>();
    mapping.put("body_ngram", new Analyzer() {

        @Override
        protected TokenStreamComponents createComponents(String fieldName) {
            Tokenizer t = new StandardTokenizer();
            ShingleFilter tf = new ShingleFilter(t, 2, 3);
            tf.setOutputUnigrams(false);
            return new TokenStreamComponents(t, new LowerCaseFilter(tf));
        }
    });
    mapping.put("body", new Analyzer() {

        @Override
        protected TokenStreamComponents createComponents(String fieldName) {
            Tokenizer t = new StandardTokenizer();
            return new TokenStreamComponents(t, new LowerCaseFilter(t));
        }
    });
    mapping.put("body_reverse", new Analyzer() {

        @Override
        protected TokenStreamComponents createComponents(String fieldName) {
            Tokenizer t = new StandardTokenizer();
            return new TokenStreamComponents(t, new ReverseStringFilter(new LowerCaseFilter(t)));
        }
    });
    PerFieldAnalyzerWrapper wrapper = new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer(), mapping);
    IndexWriterConfig conf = new IndexWriterConfig(wrapper);
    IndexWriter writer = new IndexWriter(dir, conf);
    String[] strings = new String[] { "Xorr the God-Jewel", "Grog the God-Crusher", "Xorn", "Walter Newell", "Wanda Maximoff", "Captain America", "American Ace", "Wundarr the Aquarian", "Will o' the Wisp", "Xemnu the Titan", "Fantastic Four", "Quasar", "Quasar II" };
    for (String line : strings) {
        Document doc = new Document();
        doc.add(new Field("body", line, TextField.TYPE_NOT_STORED));
        doc.add(new Field("body_reverse", line, TextField.TYPE_NOT_STORED));
        doc.add(new Field("body_ngram", line, TextField.TYPE_NOT_STORED));
        writer.addDocument(doc);
    }
    DirectoryReader ir = DirectoryReader.open(writer);
    LaplaceScorer wordScorer = new LaplaceScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.95d, new BytesRef(" "), 0.5f);
    NoisyChannelSpellChecker suggester = new NoisyChannelSpellChecker();
    DirectSpellChecker spellchecker = new DirectSpellChecker();
    spellchecker.setMinQueryLength(1);
    DirectCandidateGenerator forward = new DirectCandidateGenerator(spellchecker, "body", SuggestMode.SUGGEST_ALWAYS, ir, 0.95, 10);
    DirectCandidateGenerator reverse = new DirectCandidateGenerator(spellchecker, "body_reverse", SuggestMode.SUGGEST_ALWAYS, ir, 0.95, 10, wrapper, wrapper, MultiFields.getTerms(ir, "body_reverse"));
    CandidateGenerator generator = new MultiCandidateGeneratorWrapper(10, forward, reverse);
    Correction[] corrections = suggester.getCorrections(wrapper, new BytesRef("american cae"), generator, 1, 1, ir, "body", wordScorer, 1, 2).corrections;
    assertThat(corrections.length, equalTo(1));
    assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("american ace"));
    generator = new MultiCandidateGeneratorWrapper(5, forward, reverse);
    corrections = suggester.getCorrections(wrapper, new BytesRef("american ame"), generator, 1, 1, ir, "body", wordScorer, 1, 2).corrections;
    assertThat(corrections.length, equalTo(1));
    assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("american ace"));
    corrections = suggester.getCorrections(wrapper, new BytesRef("american cae"), forward, 1, 1, ir, "body", wordScorer, 1, 2).corrections;
    // only use forward with constant prefix
    assertThat(corrections.length, equalTo(0));
    corrections = suggester.getCorrections(wrapper, new BytesRef("america cae"), generator, 2, 1, ir, "body", wordScorer, 1, 2).corrections;
    assertThat(corrections.length, equalTo(1));
    assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("american ace"));
    corrections = suggester.getCorrections(wrapper, new BytesRef("Zorr the Got-Jewel"), generator, 0.5f, 4, ir, "body", wordScorer, 0, 2).corrections;
    assertThat(corrections.length, equalTo(4));
    assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the god jewel"));
    assertThat(corrections[1].join(new BytesRef(" ")).utf8ToString(), equalTo("zorr the god jewel"));
    assertThat(corrections[2].join(new BytesRef(" ")).utf8ToString(), equalTo("four the god jewel"));
    corrections = suggester.getCorrections(wrapper, new BytesRef("Zorr the Got-Jewel"), generator, 0.5f, 1, ir, "body", wordScorer, 1.5f, 2).corrections;
    assertThat(corrections.length, equalTo(1));
    assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the god jewel"));
    corrections = suggester.getCorrections(wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 1, ir, "body", wordScorer, 1.5f, 2).corrections;
    assertThat(corrections.length, equalTo(1));
    assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the god jewel"));
    // Test a special case where one of the suggest term is unchanged by the postFilter, 'II' here is unchanged by the reverse analyzer.
    corrections = suggester.getCorrections(wrapper, new BytesRef("Quazar II"), generator, 1, 1, ir, "body", wordScorer, 1, 2).corrections;
    assertThat(corrections.length, equalTo(1));
    assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("quasar ii"));
}
Also used : HashMap(java.util.HashMap) WhitespaceAnalyzer(org.apache.lucene.analysis.core.WhitespaceAnalyzer) Analyzer(org.apache.lucene.analysis.Analyzer) Document(org.apache.lucene.document.Document) Field(org.apache.lucene.document.Field) TextField(org.apache.lucene.document.TextField) ShingleFilter(org.apache.lucene.analysis.shingle.ShingleFilter) ReverseStringFilter(org.apache.lucene.analysis.reverse.ReverseStringFilter) Tokenizer(org.apache.lucene.analysis.Tokenizer) StandardTokenizer(org.apache.lucene.analysis.standard.StandardTokenizer) DirectSpellChecker(org.apache.lucene.search.spell.DirectSpellChecker) BytesRef(org.apache.lucene.util.BytesRef) WhitespaceAnalyzer(org.apache.lucene.analysis.core.WhitespaceAnalyzer) DirectoryReader(org.apache.lucene.index.DirectoryReader) RAMDirectory(org.apache.lucene.store.RAMDirectory) PerFieldAnalyzerWrapper(org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper) IndexWriter(org.apache.lucene.index.IndexWriter) StandardTokenizer(org.apache.lucene.analysis.standard.StandardTokenizer) LowerCaseFilter(org.apache.lucene.analysis.LowerCaseFilter) IndexWriterConfig(org.apache.lucene.index.IndexWriterConfig)

Example 4 with DirectSpellChecker

use of org.apache.lucene.search.spell.DirectSpellChecker in project OpenGrok by OpenGrok.

the class SearchHelper method prepareExec.

/**
 * Create the searcher to use w.r.t. currently set parameters and the given
 * projects. Does not produce any {@link #redirect} link. It also does
 * nothing if {@link #redirect} or {@link #errorMsg} have a
 * none-{@code null} value.
 * <p>
 * Parameters which should be populated/set at this time:
 * <ul>
 * <li>{@link #builder}</li> <li>{@link #dataRoot}</li>
 * <li>{@link #order} (falls back to relevance if unset)</li>
 * <li>{@link #parallel} (default: false)</li> </ul> Populates/sets: <ul>
 * <li>{@link #query}</li> <li>{@link #searcher}</li> <li>{@link #sort}</li>
 * <li>{@link #projects}</li> <li>{@link #errorMsg} if an error occurs</li>
 * </ul>
 *
 * @param projects project names. If empty, a no-project setup
 * is assumed (i.e. DATA_ROOT/index will be used instead of possible
 * multiple DATA_ROOT/$project/index). If the set contains projects
 * not known in the configuration or projects not yet indexed,
 * an error will be returned in {@link #errorMsg}.
 * @return this instance
 */
public SearchHelper prepareExec(SortedSet<String> projects) {
    if (redirect != null || errorMsg != null) {
        return this;
    }
    // the Query created by the QueryBuilder
    try {
        indexDir = new File(dataRoot, IndexDatabase.INDEX_DIR);
        query = builder.build();
        if (projects == null) {
            errorMsg = "No project selected!";
            return this;
        }
        this.projects = projects;
        if (projects.isEmpty()) {
            // no project setup
            FSDirectory dir = FSDirectory.open(indexDir.toPath());
            searcher = new IndexSearcher(DirectoryReader.open(dir));
            closeOnDestroy = true;
        } else {
            // Check list of project names first to make sure all of them
            // are valid and indexed.
            closeOnDestroy = false;
            Set<String> invalidProjects = projects.stream().filter(proj -> (Project.getByName(proj) == null)).collect(Collectors.toSet());
            if (invalidProjects.size() > 0) {
                errorMsg = "Project list contains invalid projects: " + String.join(", ", invalidProjects);
                return this;
            }
            Set<Project> notIndexedProjects = projects.stream().map(x -> Project.getByName(x)).filter(proj -> !proj.isIndexed()).collect(Collectors.toSet());
            if (notIndexedProjects.size() > 0) {
                errorMsg = "Some of the projects to be searched are not indexed yet: " + String.join(", ", notIndexedProjects.stream().map(proj -> proj.getName()).collect(Collectors.toSet()));
                return this;
            }
            // We use MultiReader even for single project. This should
            // not matter given that MultiReader is just a cheap wrapper
            // around set of IndexReader objects.
            MultiReader multireader = RuntimeEnvironment.getInstance().getMultiReader(projects, searcherList);
            if (multireader != null) {
                searcher = new IndexSearcher(multireader);
            } else {
                errorMsg = "Failed to initialize search. Check the index.";
                return this;
            }
        }
        // Most probably they are not reused. SearcherLifetimeManager might help here.
        switch(order) {
            case LASTMODIFIED:
                sort = new Sort(new SortField(QueryBuilder.DATE, SortField.Type.STRING, true));
                break;
            case BY_PATH:
                sort = new Sort(new SortField(QueryBuilder.FULLPATH, SortField.Type.STRING));
                break;
            default:
                sort = Sort.RELEVANCE;
                break;
        }
        checker = new DirectSpellChecker();
    } catch (ParseException e) {
        errorMsg = PARSE_ERROR_MSG + e.getMessage();
    } catch (FileNotFoundException e) {
        // errorMsg = "Index database(s) not found: " + e.getMessage();
        errorMsg = "Index database(s) not found.";
    } catch (IOException e) {
        errorMsg = e.getMessage();
    }
    return this;
}
Also used : SuperIndexSearcher(org.opensolaris.opengrok.configuration.SuperIndexSearcher) IndexSearcher(org.apache.lucene.search.IndexSearcher) MultiReader(org.apache.lucene.index.MultiReader) Query(org.apache.lucene.search.Query) ParseException(org.apache.lucene.queryparser.classic.ParseException) CompatibleAnalyser(org.opensolaris.opengrok.analysis.CompatibleAnalyser) SuggestMode(org.apache.lucene.search.spell.SuggestMode) SortedSet(java.util.SortedSet) ScoreDoc(org.apache.lucene.search.ScoreDoc) IOUtils(org.opensolaris.opengrok.util.IOUtils) Term(org.apache.lucene.index.Term) QueryBuilder(org.opensolaris.opengrok.search.QueryBuilder) TreeSet(java.util.TreeSet) ArrayList(java.util.ArrayList) Level(java.util.logging.Level) LoggerFactory(org.opensolaris.opengrok.logger.LoggerFactory) Document(org.apache.lucene.document.Document) Definitions(org.opensolaris.opengrok.analysis.Definitions) Project(org.opensolaris.opengrok.configuration.Project) RuntimeEnvironment(org.opensolaris.opengrok.configuration.RuntimeEnvironment) Map(java.util.Map) SortField(org.apache.lucene.search.SortField) FSDirectory(org.apache.lucene.store.FSDirectory) AnalyzerGuru(org.opensolaris.opengrok.analysis.AnalyzerGuru) SuggestWord(org.apache.lucene.search.spell.SuggestWord) SuperIndexSearcher(org.opensolaris.opengrok.configuration.SuperIndexSearcher) Sort(org.apache.lucene.search.Sort) DirectoryReader(org.apache.lucene.index.DirectoryReader) DirectSpellChecker(org.apache.lucene.search.spell.DirectSpellChecker) Set(java.util.Set) IOException(java.io.IOException) Summarizer(org.opensolaris.opengrok.search.Summarizer) Logger(java.util.logging.Logger) Collectors(java.util.stream.Collectors) File(java.io.File) FileNotFoundException(java.io.FileNotFoundException) List(java.util.List) TermQuery(org.apache.lucene.search.TermQuery) BooleanQuery(org.apache.lucene.search.BooleanQuery) IndexDatabase(org.opensolaris.opengrok.index.IndexDatabase) HistoryContext(org.opensolaris.opengrok.search.context.HistoryContext) Pattern(java.util.regex.Pattern) TopFieldDocs(org.apache.lucene.search.TopFieldDocs) Context(org.opensolaris.opengrok.search.context.Context) IndexReader(org.apache.lucene.index.IndexReader) IndexSearcher(org.apache.lucene.search.IndexSearcher) MultiReader(org.apache.lucene.index.MultiReader) FileNotFoundException(java.io.FileNotFoundException) FSDirectory(org.apache.lucene.store.FSDirectory) SortField(org.apache.lucene.search.SortField) IOException(java.io.IOException) Project(org.opensolaris.opengrok.configuration.Project) Sort(org.apache.lucene.search.Sort) ParseException(org.apache.lucene.queryparser.classic.ParseException) File(java.io.File) DirectSpellChecker(org.apache.lucene.search.spell.DirectSpellChecker)

Example 5 with DirectSpellChecker

use of org.apache.lucene.search.spell.DirectSpellChecker in project OpenGrok by OpenGrok.

the class SearchHelper method prepareExec.

/**
 * Create the searcher to use w.r.t. currently set parameters and the given
 * projects. Does not produce any {@link #redirect} link. It also does
 * nothing if {@link #redirect} or {@link #errorMsg} have a
 * none-{@code null} value.
 * <p>
 * Parameters which should be populated/set at this time:
 * <ul>
 * <li>{@link #builder}</li> <li>{@link #dataRoot}</li>
 * <li>{@link #order} (falls back to relevance if unset)</li>
 * </ul>
 * Populates/sets:
 * <ul>
 * <li>{@link #query}</li> <li>{@link #searcher}</li> <li>{@link #sort}</li>
 * <li>{@link #projects}</li> <li>{@link #errorMsg} if an error occurs</li>
 * </ul>
 *
 * @param projects project names. If empty, a no-project setup
 * is assumed (i.e. DATA_ROOT/index will be used instead of possible
 * multiple DATA_ROOT/$project/index). If the set contains projects
 * not known in the configuration or projects not yet indexed,
 * an error will be returned in {@link #errorMsg}.
 * @return this instance
 */
public SearchHelper prepareExec(SortedSet<String> projects) {
    if (redirect != null || errorMsg != null) {
        return this;
    }
    settingsHelper = null;
    // the Query created by the QueryBuilder
    try {
        indexDir = new File(dataRoot, IndexDatabase.INDEX_DIR);
        query = builder.build();
        if (projects == null) {
            errorMsg = "No project selected!";
            return this;
        }
        this.projects = projects;
        if (projects.isEmpty()) {
            // no project setup
            FSDirectory dir = FSDirectory.open(indexDir.toPath());
            reader = DirectoryReader.open(dir);
            searcher = new IndexSearcher(reader);
            closeOnDestroy = true;
        } else {
            // Check list of project names first to make sure all of them
            // are valid and indexed.
            closeOnDestroy = false;
            Set<String> invalidProjects = projects.stream().filter(proj -> (Project.getByName(proj) == null)).collect(Collectors.toSet());
            if (!invalidProjects.isEmpty()) {
                errorMsg = "Project list contains invalid projects: " + String.join(", ", invalidProjects);
                return this;
            }
            Set<Project> notIndexedProjects = projects.stream().map(Project::getByName).filter(proj -> !proj.isIndexed()).collect(Collectors.toSet());
            if (!notIndexedProjects.isEmpty()) {
                errorMsg = "Some of the projects to be searched are not indexed yet: " + String.join(", ", notIndexedProjects.stream().map(Project::getName).collect(Collectors.toSet()));
                return this;
            }
            // We use MultiReader even for single project. This should
            // not matter given that MultiReader is just a cheap wrapper
            // around set of IndexReader objects.
            reader = RuntimeEnvironment.getInstance().getMultiReader(projects, searcherList);
            if (reader != null) {
                searcher = new IndexSearcher(reader);
            } else {
                errorMsg = "Failed to initialize search. Check the index";
                if (!projects.isEmpty()) {
                    errorMsg += " for projects: " + String.join(", ", projects);
                }
                return this;
            }
        }
        // Most probably they are not reused. SearcherLifetimeManager might help here.
        switch(order) {
            case LASTMODIFIED:
                sort = new Sort(new SortField(QueryBuilder.DATE, SortField.Type.STRING, true));
                break;
            case BY_PATH:
                sort = new Sort(new SortField(QueryBuilder.FULLPATH, SortField.Type.STRING));
                break;
            default:
                sort = Sort.RELEVANCE;
                break;
        }
        checker = new DirectSpellChecker();
    } catch (ParseException e) {
        errorMsg = PARSE_ERROR_MSG + e.getMessage();
    } catch (FileNotFoundException e) {
        errorMsg = "Index database not found. Check the index";
        if (!projects.isEmpty()) {
            errorMsg += " for projects: " + String.join(", ", projects);
        }
        errorMsg += "; " + e.getMessage();
    } catch (IOException e) {
        errorMsg = e.getMessage();
    }
    return this;
}
Also used : SuperIndexSearcher(org.opengrok.indexer.configuration.SuperIndexSearcher) IndexSearcher(org.apache.lucene.search.IndexSearcher) Query(org.apache.lucene.search.Query) HistoryContext(org.opengrok.indexer.search.context.HistoryContext) SortedSet(java.util.SortedSet) ScoreDoc(org.apache.lucene.search.ScoreDoc) Context(org.opengrok.indexer.search.context.Context) SettingsHelper(org.opengrok.indexer.search.SettingsHelper) MatchesUtils(org.apache.lucene.search.MatchesUtils) IndexableField(org.apache.lucene.index.IndexableField) Summarizer(org.opengrok.indexer.search.Summarizer) Term(org.apache.lucene.index.Term) Project(org.opengrok.indexer.configuration.Project) ForbiddenSymlinkException(org.opengrok.indexer.util.ForbiddenSymlinkException) Document(org.apache.lucene.document.Document) Map(java.util.Map) RuntimeEnvironment(org.opengrok.indexer.configuration.RuntimeEnvironment) LeafReaderContext(org.apache.lucene.index.LeafReaderContext) SortField(org.apache.lucene.search.SortField) Path(java.nio.file.Path) SuggestWord(org.apache.lucene.search.spell.SuggestWord) MatchesIterator(org.apache.lucene.search.MatchesIterator) Definitions(org.opengrok.indexer.analysis.Definitions) Sort(org.apache.lucene.search.Sort) DirectoryReader(org.apache.lucene.index.DirectoryReader) DirectSpellChecker(org.apache.lucene.search.spell.DirectSpellChecker) Set(java.util.Set) Logger(java.util.logging.Logger) Collectors(java.util.stream.Collectors) IndexDatabase(org.opengrok.indexer.index.IndexDatabase) FileNotFoundException(java.io.FileNotFoundException) List(java.util.List) SuperIndexSearcher(org.opengrok.indexer.configuration.SuperIndexSearcher) AbstractAnalyzer(org.opengrok.indexer.analysis.AbstractAnalyzer) IndexedSymlink(org.opengrok.indexer.index.IndexedSymlink) LoggerFactory(org.opengrok.indexer.logger.LoggerFactory) Pattern(java.util.regex.Pattern) TopFieldDocs(org.apache.lucene.search.TopFieldDocs) IndexReader(org.apache.lucene.index.IndexReader) IndexSearcher(org.apache.lucene.search.IndexSearcher) ReaderUtil(org.apache.lucene.index.ReaderUtil) ParseException(org.apache.lucene.queryparser.classic.ParseException) SuggestMode(org.apache.lucene.search.spell.SuggestMode) Weight(org.apache.lucene.search.Weight) TreeSet(java.util.TreeSet) ArrayList(java.util.ArrayList) Level(java.util.logging.Level) CompatibleAnalyser(org.opengrok.indexer.analysis.CompatibleAnalyser) FSDirectory(org.apache.lucene.store.FSDirectory) TopDocs(org.apache.lucene.search.TopDocs) AnalyzerGuru(org.opengrok.indexer.analysis.AnalyzerGuru) IOUtils(org.opengrok.indexer.util.IOUtils) QueryBuilder(org.opengrok.indexer.search.QueryBuilder) IOException(java.io.IOException) File(java.io.File) ScoreMode(org.apache.lucene.search.ScoreMode) TermQuery(org.apache.lucene.search.TermQuery) Paths(java.nio.file.Paths) Matches(org.apache.lucene.search.Matches) FileNotFoundException(java.io.FileNotFoundException) FSDirectory(org.apache.lucene.store.FSDirectory) SortField(org.apache.lucene.search.SortField) IOException(java.io.IOException) Project(org.opengrok.indexer.configuration.Project) Sort(org.apache.lucene.search.Sort) ParseException(org.apache.lucene.queryparser.classic.ParseException) File(java.io.File) DirectSpellChecker(org.apache.lucene.search.spell.DirectSpellChecker)

Aggregations

DirectSpellChecker (org.apache.lucene.search.spell.DirectSpellChecker)8 Document (org.apache.lucene.document.Document)5 DirectoryReader (org.apache.lucene.index.DirectoryReader)5 IOException (java.io.IOException)4 IndexReader (org.apache.lucene.index.IndexReader)4 SuggestWord (org.apache.lucene.search.spell.SuggestWord)4 ArrayList (java.util.ArrayList)3 HashMap (java.util.HashMap)3 Map (java.util.Map)3 Analyzer (org.apache.lucene.analysis.Analyzer)3 LowerCaseFilter (org.apache.lucene.analysis.LowerCaseFilter)3 Tokenizer (org.apache.lucene.analysis.Tokenizer)3 WhitespaceAnalyzer (org.apache.lucene.analysis.core.WhitespaceAnalyzer)3 PerFieldAnalyzerWrapper (org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper)3 ShingleFilter (org.apache.lucene.analysis.shingle.ShingleFilter)3 StandardTokenizer (org.apache.lucene.analysis.standard.StandardTokenizer)3 BytesRef (org.apache.lucene.util.BytesRef)3 File (java.io.File)2 FileNotFoundException (java.io.FileNotFoundException)2 StringReader (java.io.StringReader)2