Search in sources :

Example 21 with EnglishAnalyzer

use of org.apache.lucene.analysis.en.EnglishAnalyzer in project Anserini by castorini.

the class RetrieveSentences method search.

public Map<String, Float> search(SortedMap<Integer, String> topics, int numHits) throws IOException, ParseException {
    IndexSearcher searcher = new IndexSearcher(reader);
    // using BM25 scoring model
    Similarity similarity = new BM25Similarity(0.9f, 0.4f);
    searcher.setSimilarity(similarity);
    EnglishAnalyzer ea = new EnglishAnalyzer();
    QueryParser queryParser = new QueryParser(FIELD_BODY, ea);
    queryParser.setDefaultOperator(QueryParser.Operator.OR);
    Map<String, Float> scoredDocs = new LinkedHashMap<>();
    for (Map.Entry<Integer, String> entry : topics.entrySet()) {
        int qID = entry.getKey();
        String queryString = entry.getValue();
        Query query = AnalyzerUtils.buildBagOfWordsQuery(FIELD_BODY, ea, queryString);
        TopDocs rs = searcher.search(query, numHits);
        ScoreDoc[] hits = rs.scoreDocs;
        ScoredDocuments docs = ScoredDocuments.fromTopDocs(rs, searcher);
        for (int i = 0; i < docs.documents.length; i++) {
            scoredDocs.put(docs.documents[i].getField(FIELD_ID).stringValue(), docs.scores[i]);
        }
    }
    return scoredDocs;
}
Also used : IndexSearcher(org.apache.lucene.search.IndexSearcher) Similarity(org.apache.lucene.search.similarities.Similarity) BM25Similarity(org.apache.lucene.search.similarities.BM25Similarity) Query(org.apache.lucene.search.Query) ScoredDocuments(io.anserini.rerank.ScoredDocuments) EnglishAnalyzer(org.apache.lucene.analysis.en.EnglishAnalyzer) ScoreDoc(org.apache.lucene.search.ScoreDoc) TopDocs(org.apache.lucene.search.TopDocs) QueryParser(org.apache.lucene.queryparser.classic.QueryParser) BM25Similarity(org.apache.lucene.search.similarities.BM25Similarity)

Example 22 with EnglishAnalyzer

use of org.apache.lucene.analysis.en.EnglishAnalyzer in project Anserini by castorini.

the class IdfPassageScorer method score.

@Override
public void score(String query, Map<String, Float> sentences) throws Exception {
    EnglishAnalyzer englishAnalyzer = new EnglishAnalyzer(StopFilter.makeStopSet(stopWords));
    QueryParser queryParser = new QueryParser(LuceneDocumentGenerator.FIELD_BODY, englishAnalyzer);
    ClassicSimilarity similarity = new ClassicSimilarity();
    String escapedQuery = queryParser.escape(query);
    Query question = queryParser.parse(escapedQuery);
    HashSet<String> questionTerms = new HashSet<>(Arrays.asList(question.toString().trim().toLowerCase().split("\\s+")));
    EnglishAnalyzer englishAnalyzerWithStop = new EnglishAnalyzer(CharArraySet.EMPTY_SET);
    QueryParser queryParserWithStop = new QueryParser(LuceneDocumentGenerator.FIELD_BODY, englishAnalyzerWithStop);
    Query questionWithStopWords = queryParserWithStop.parse(escapedQuery);
    HashSet<String> questionTermsIDF = new HashSet<>(Arrays.asList(questionWithStopWords.toString().trim().toLowerCase().split("\\s+")));
    // add the question terms to the termIDF Map
    for (String questionTerm : questionTermsIDF) {
        try {
            TermQuery q = (TermQuery) queryParserWithStop.parse(questionTerm);
            Term t = q.getTerm();
            double termIDF = similarity.idf(reader.docFreq(t), reader.numDocs());
            termIdfMap.put(questionTerm, String.valueOf(termIDF));
        } catch (Exception e) {
            continue;
        }
    }
    // avoid duplicate passages
    HashSet<String> seenSentences = new HashSet<>();
    for (Map.Entry<String, Float> sent : sentences.entrySet()) {
        double idf = 0.0;
        HashSet<String> seenTerms = new HashSet<>();
        String[] terms = sent.getKey().toLowerCase().split("\\s+");
        for (String term : terms) {
            try {
                TermQuery q = (TermQuery) queryParser.parse(term);
                Term t = q.getTerm();
                double termIDF = similarity.idf(reader.docFreq(t), reader.numDocs());
                if (questionTerms.contains(t.toString()) && !seenTerms.contains(t.toString())) {
                    idf += termIDF;
                    seenTerms.add(t.toString());
                }
                TermQuery q2 = (TermQuery) queryParserWithStop.parse(term);
                Term t2 = q2.getTerm();
                double termIDFwithStop = similarity.idf(reader.docFreq(t2), reader.numDocs());
                termIdfMap.put(term, String.valueOf(termIDFwithStop));
            } catch (Exception e) {
                continue;
            }
        }
        double weightedScore = idf + 0.0001 * sent.getValue();
        ScoredPassage scoredPassage = new ScoredPassage(sent.getKey(), weightedScore, sent.getValue());
        if ((scoredPassageHeap.size() < topPassages || weightedScore > scoredPassageHeap.peekLast().getScore()) && !seenSentences.contains(sent)) {
            if (scoredPassageHeap.size() == topPassages) {
                scoredPassageHeap.pollLast();
            }
            scoredPassageHeap.add(scoredPassage);
            seenSentences.add(sent.getKey());
        }
    }
}
Also used : ClassicSimilarity(org.apache.lucene.search.similarities.ClassicSimilarity) TermQuery(org.apache.lucene.search.TermQuery) Query(org.apache.lucene.search.Query) TermQuery(org.apache.lucene.search.TermQuery) EnglishAnalyzer(org.apache.lucene.analysis.en.EnglishAnalyzer) Term(org.apache.lucene.index.Term) IOException(java.io.IOException) QueryParser(org.apache.lucene.queryparser.classic.QueryParser) HashMap(java.util.HashMap) Map(java.util.Map) HashSet(java.util.HashSet)

Example 23 with EnglishAnalyzer

use of org.apache.lucene.analysis.en.EnglishAnalyzer in project Anserini by castorini.

the class SearchWebCollection method search.

/**
 * Prints TREC submission file to the standard output stream.
 *
 * @param topics     queries
 * @param similarity similarity
 * @throws IOException
 * @throws ParseException
 */
public void search(SortedMap<Integer, String> topics, String submissionFile, Similarity similarity, int numHits, RerankerCascade cascade, boolean useQueryParser, boolean keepstopwords) throws IOException, ParseException {
    IndexSearcher searcher = new IndexSearcher(reader);
    searcher.setSimilarity(similarity);
    final String runTag = "BM25_EnglishAnalyzer_" + (keepstopwords ? "KeepStopwords_" : "") + FIELD_BODY + "_" + similarity.toString();
    PrintWriter out = new PrintWriter(Files.newBufferedWriter(Paths.get(submissionFile), StandardCharsets.US_ASCII));
    EnglishAnalyzer ea = keepstopwords ? new EnglishAnalyzer(CharArraySet.EMPTY_SET) : new EnglishAnalyzer();
    QueryParser queryParser = new QueryParser(FIELD_BODY, ea);
    queryParser.setDefaultOperator(QueryParser.Operator.OR);
    for (Map.Entry<Integer, String> entry : topics.entrySet()) {
        int qID = entry.getKey();
        String queryString = entry.getValue();
        Query query = useQueryParser ? queryParser.parse(queryString) : AnalyzerUtils.buildBagOfWordsQuery(FIELD_BODY, ea, queryString);
        /**
         * For Web Tracks 2010,2011,and 2012; an experimental run consists of the top 10,000 documents for each topic query.
         */
        TopDocs rs = searcher.search(query, numHits);
        ScoreDoc[] hits = rs.scoreDocs;
        List<String> queryTokens = AnalyzerUtils.tokenize(ea, queryString);
        RerankerContext context = new RerankerContext(searcher, query, String.valueOf(qID), queryString, queryTokens, FIELD_BODY, null);
        ScoredDocuments docs = cascade.run(ScoredDocuments.fromTopDocs(rs, searcher), context);
        /**
         * the first column is the topic number.
         * the second column is currently unused and should always be "Q0".
         * the third column is the official document identifier of the retrieved document.
         * the fourth column is the rank the document is retrieved.
         * the fifth column shows the score (integer or floating point) that generated the ranking.
         * the sixth column is called the "run tag" and should be a unique identifier for your
         */
        for (int i = 0; i < docs.documents.length; i++) {
            out.println(String.format("%d Q0 %s %d %f %s", qID, docs.documents[i].getField(FIELD_ID).stringValue(), (i + 1), docs.scores[i], runTag));
        }
    }
    out.flush();
    out.close();
}
Also used : IndexSearcher(org.apache.lucene.search.IndexSearcher) Query(org.apache.lucene.search.Query) ScoredDocuments(io.anserini.rerank.ScoredDocuments) EnglishAnalyzer(org.apache.lucene.analysis.en.EnglishAnalyzer) ScoreDoc(org.apache.lucene.search.ScoreDoc) TopDocs(org.apache.lucene.search.TopDocs) QueryParser(org.apache.lucene.queryparser.classic.QueryParser) Map(java.util.Map) SortedMap(java.util.SortedMap) RerankerContext(io.anserini.rerank.RerankerContext) PrintWriter(java.io.PrintWriter)

Example 24 with EnglishAnalyzer

use of org.apache.lucene.analysis.en.EnglishAnalyzer in project xodus by JetBrains.

the class ExodusLuceneTestsBase method removeStopWord.

protected void removeStopWord(final String stopWord) {
    final HashSet<Object> stopSet = new HashSet<>();
    for (Object word : ((StopwordAnalyzerBase) analyzer).getStopwordSet()) {
        if (!stopWord.equals(new String((char[]) word))) {
            stopSet.add(word);
        }
    }
    analyzer = new EnglishAnalyzer(LUCENE_VERSION, stopSet);
}
Also used : StopwordAnalyzerBase(org.apache.lucene.analysis.StopwordAnalyzerBase) EnglishAnalyzer(org.apache.lucene.analysis.en.EnglishAnalyzer) HashSet(jetbrains.exodus.core.dataStructures.hash.HashSet)

Example 25 with EnglishAnalyzer

use of org.apache.lucene.analysis.en.EnglishAnalyzer in project Anserini by castorini.

the class IndexerTestBase method buildTestIndex.

// A very simple example of how to build an index.
private void buildTestIndex() throws IOException {
    Directory dir = FSDirectory.open(tempDir1);
    Analyzer analyzer = new EnglishAnalyzer();
    IndexWriterConfig config = new IndexWriterConfig(analyzer);
    config.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
    IndexWriter writer = new IndexWriter(dir, config);
    FieldType textOptions = new FieldType();
    textOptions.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
    textOptions.setStored(true);
    textOptions.setTokenized(true);
    textOptions.setStoreTermVectors(true);
    textOptions.setStoreTermVectorPositions(true);
    Document doc1 = new Document();
    String doc1Text = "here is some text here is some more text. city.";
    doc1.add(new StringField(IndexArgs.ID, "doc1", Field.Store.YES));
    doc1.add(new SortedDocValuesField(IndexArgs.ID, new BytesRef("doc1".getBytes())));
    doc1.add(new Field(IndexArgs.CONTENTS, doc1Text, textOptions));
    // specifically demonstrate how "contents" and "raw" might diverge:
    doc1.add(new StoredField(IndexArgs.RAW, String.format("{\"contents\": \"%s\"}", doc1Text)));
    writer.addDocument(doc1);
    Document doc2 = new Document();
    String doc2Text = "more texts";
    doc2.add(new StringField(IndexArgs.ID, "doc2", Field.Store.YES));
    doc2.add(new SortedDocValuesField(IndexArgs.ID, new BytesRef("doc2".getBytes())));
    // Note plural, to test stemming
    doc2.add(new Field(IndexArgs.CONTENTS, doc2Text, textOptions));
    // specifically demonstrate how "contents" and "raw" might diverge:
    doc2.add(new StoredField(IndexArgs.RAW, String.format("{\"contents\": \"%s\"}", doc2Text)));
    writer.addDocument(doc2);
    Document doc3 = new Document();
    String doc3Text = "here is a test";
    doc3.add(new StringField(IndexArgs.ID, "doc3", Field.Store.YES));
    doc3.add(new SortedDocValuesField(IndexArgs.ID, new BytesRef("doc3".getBytes())));
    doc3.add(new Field(IndexArgs.CONTENTS, doc3Text, textOptions));
    // specifically demonstrate how "contents" and "raw" might diverge:
    doc3.add(new StoredField(IndexArgs.RAW, String.format("{\"contents\": \"%s\"}", doc3Text)));
    writer.addDocument(doc3);
    writer.commit();
    writer.forceMerge(1);
    writer.close();
    dir.close();
}
Also used : EnglishAnalyzer(org.apache.lucene.analysis.en.EnglishAnalyzer) EnglishAnalyzer(org.apache.lucene.analysis.en.EnglishAnalyzer) Analyzer(org.apache.lucene.analysis.Analyzer) Document(org.apache.lucene.document.Document) FieldType(org.apache.lucene.document.FieldType) StringField(org.apache.lucene.document.StringField) StoredField(org.apache.lucene.document.StoredField) SortedDocValuesField(org.apache.lucene.document.SortedDocValuesField) Field(org.apache.lucene.document.Field) StoredField(org.apache.lucene.document.StoredField) IndexWriter(org.apache.lucene.index.IndexWriter) StringField(org.apache.lucene.document.StringField) SortedDocValuesField(org.apache.lucene.document.SortedDocValuesField) BytesRef(org.apache.lucene.util.BytesRef) Directory(org.apache.lucene.store.Directory) FSDirectory(org.apache.lucene.store.FSDirectory) IndexWriterConfig(org.apache.lucene.index.IndexWriterConfig)

Aggregations

EnglishAnalyzer (org.apache.lucene.analysis.en.EnglishAnalyzer)26 Analyzer (org.apache.lucene.analysis.Analyzer)9 Directory (org.apache.lucene.store.Directory)9 FSDirectory (org.apache.lucene.store.FSDirectory)9 Test (org.junit.Test)6 IndexWriter (org.apache.lucene.index.IndexWriter)5 IndexWriterConfig (org.apache.lucene.index.IndexWriterConfig)5 QueryParser (org.apache.lucene.queryparser.classic.QueryParser)5 ScoredDocuments (io.anserini.rerank.ScoredDocuments)4 IOException (java.io.IOException)4 MockAnalyzer (org.apache.lucene.analysis.MockAnalyzer)4 Query (org.apache.lucene.search.Query)4 RerankerContext (io.anserini.rerank.RerankerContext)3 StandardAnalyzer (org.apache.lucene.analysis.standard.StandardAnalyzer)3 Document (org.apache.lucene.document.Document)3 Term (org.apache.lucene.index.Term)3 IndexSearcher (org.apache.lucene.search.IndexSearcher)3 ScoreDoc (org.apache.lucene.search.ScoreDoc)3 TopDocs (org.apache.lucene.search.TopDocs)3 BM25Similarity (org.apache.lucene.search.similarities.BM25Similarity)3