Search in sources :

Example 86 with RandomIndexWriter

use of org.apache.lucene.index.RandomIndexWriter in project lucene-solr by apache.

the class TestContextQuery method testRandomContextQueryScoring.

@Test
public void testRandomContextQueryScoring() throws Exception {
    Analyzer analyzer = new MockAnalyzer(random());
    try (RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"))) {
        int numSuggestions = atLeast(20);
        int numContexts = atLeast(5);
        Set<Integer> seenWeights = new HashSet<>();
        List<Entry> expectedEntries = new ArrayList<>();
        List<CharSequence> contexts = new ArrayList<>();
        for (int i = 1; i <= numContexts; i++) {
            CharSequence context = TestUtil.randomSimpleString(random(), 10) + i;
            contexts.add(context);
            for (int j = 1; j <= numSuggestions; j++) {
                String suggestion = "sugg_" + TestUtil.randomSimpleString(random(), 10) + j;
                int weight = TestUtil.nextInt(random(), 1, 1000 * numContexts * numSuggestions);
                while (seenWeights.contains(weight)) {
                    weight = TestUtil.nextInt(random(), 1, 1000 * numContexts * numSuggestions);
                }
                seenWeights.add(weight);
                Document document = new Document();
                document.add(new ContextSuggestField("suggest_field", suggestion, weight, context));
                iw.addDocument(document);
                expectedEntries.add(new Entry(suggestion, context.toString(), i * weight));
            }
            if (rarely()) {
                iw.commit();
            }
        }
        Entry[] expectedResults = expectedEntries.toArray(new Entry[expectedEntries.size()]);
        ArrayUtil.introSort(expectedResults, new Comparator<Entry>() {

            @Override
            public int compare(Entry o1, Entry o2) {
                int cmp = Float.compare(o2.value, o1.value);
                if (cmp != 0) {
                    return cmp;
                } else {
                    return o1.output.compareTo(o2.output);
                }
            }
        });
        try (DirectoryReader reader = iw.getReader()) {
            SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader);
            ContextQuery query = new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg")));
            for (int i = 0; i < contexts.size(); i++) {
                query.addContext(contexts.get(i), i + 1);
            }
            TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 4, false);
            assertSuggestions(suggest, Arrays.copyOfRange(expectedResults, 0, 4));
        }
    }
}
Also used : DirectoryReader(org.apache.lucene.index.DirectoryReader) ArrayList(java.util.ArrayList) Term(org.apache.lucene.index.Term) Analyzer(org.apache.lucene.analysis.Analyzer) MockAnalyzer(org.apache.lucene.analysis.MockAnalyzer) Document(org.apache.lucene.document.Document) Entry(org.apache.lucene.search.suggest.document.TestSuggestField.Entry) MockAnalyzer(org.apache.lucene.analysis.MockAnalyzer) RandomIndexWriter(org.apache.lucene.index.RandomIndexWriter) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 87 with RandomIndexWriter

use of org.apache.lucene.index.RandomIndexWriter in project lucene-solr by apache.

the class TestContextQuery method testEmptyContextWithBoosts.

@Test
public void testEmptyContextWithBoosts() throws Exception {
    Analyzer analyzer = new MockAnalyzer(random());
    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
    Document document = new Document();
    document.add(new ContextSuggestField("suggest_field", "suggestion1", 4));
    document.add(new ContextSuggestField("suggest_field", "suggestion2", 3));
    document.add(new ContextSuggestField("suggest_field", "suggestion3", 2));
    iw.addDocument(document);
    document = new Document();
    document.add(new ContextSuggestField("suggest_field", "suggestion4", 1, "type4"));
    iw.addDocument(document);
    if (rarely()) {
        iw.commit();
    }
    DirectoryReader reader = iw.getReader();
    SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader);
    ContextQuery query = new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg")));
    query.addContext("type4", 10);
    query.addAllContexts();
    TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 5, false);
    assertSuggestions(suggest, new Entry("suggestion4", "type4", 1 * 10), new Entry("suggestion1", null, 4), new Entry("suggestion2", null, 3), new Entry("suggestion3", null, 2));
    reader.close();
    iw.close();
}
Also used : Entry(org.apache.lucene.search.suggest.document.TestSuggestField.Entry) MockAnalyzer(org.apache.lucene.analysis.MockAnalyzer) DirectoryReader(org.apache.lucene.index.DirectoryReader) Term(org.apache.lucene.index.Term) Analyzer(org.apache.lucene.analysis.Analyzer) MockAnalyzer(org.apache.lucene.analysis.MockAnalyzer) Document(org.apache.lucene.document.Document) RandomIndexWriter(org.apache.lucene.index.RandomIndexWriter) Test(org.junit.Test)

Example 88 with RandomIndexWriter

use of org.apache.lucene.index.RandomIndexWriter in project lucene-solr by apache.

the class BaseGeoPointTestCase method testMultiValued.

public void testMultiValued() throws Exception {
    int numPoints = atLeast(10000);
    // Every doc has 2 points:
    double[] lats = new double[2 * numPoints];
    double[] lons = new double[2 * numPoints];
    Directory dir = newDirectory();
    IndexWriterConfig iwc = newIndexWriterConfig();
    // We rely on docID order:
    iwc.setMergePolicy(newLogMergePolicy());
    // and on seeds being able to reproduce:
    iwc.setMergeScheduler(new SerialMergeScheduler());
    RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
    for (int id = 0; id < numPoints; id++) {
        Document doc = new Document();
        lats[2 * id] = quantizeLat(nextLatitude());
        lons[2 * id] = quantizeLon(nextLongitude());
        doc.add(newStringField("id", "" + id, Field.Store.YES));
        addPointToDoc(FIELD_NAME, doc, lats[2 * id], lons[2 * id]);
        lats[2 * id + 1] = quantizeLat(nextLatitude());
        lons[2 * id + 1] = quantizeLon(nextLongitude());
        addPointToDoc(FIELD_NAME, doc, lats[2 * id + 1], lons[2 * id + 1]);
        if (VERBOSE) {
            System.out.println("id=" + id);
            System.out.println("  lat=" + lats[2 * id] + " lon=" + lons[2 * id]);
            System.out.println("  lat=" + lats[2 * id + 1] + " lon=" + lons[2 * id + 1]);
        }
        w.addDocument(doc);
    }
    // TODO: share w/ verify; just need parallel array of the expected ids
    if (random().nextBoolean()) {
        w.forceMerge(1);
    }
    IndexReader r = w.getReader();
    w.close();
    IndexSearcher s = newSearcher(r);
    int iters = atLeast(25);
    for (int iter = 0; iter < iters; iter++) {
        Rectangle rect = nextBox();
        if (VERBOSE) {
            System.out.println("\nTEST: iter=" + iter + " rect=" + rect);
        }
        Query query = newRectQuery(FIELD_NAME, rect.minLat, rect.maxLat, rect.minLon, rect.maxLon);
        final FixedBitSet hits = new FixedBitSet(r.maxDoc());
        s.search(query, new SimpleCollector() {

            private int docBase;

            @Override
            public boolean needsScores() {
                return false;
            }

            @Override
            protected void doSetNextReader(LeafReaderContext context) throws IOException {
                docBase = context.docBase;
            }

            @Override
            public void collect(int doc) {
                hits.set(docBase + doc);
            }
        });
        boolean fail = false;
        for (int docID = 0; docID < lats.length / 2; docID++) {
            double latDoc1 = lats[2 * docID];
            double lonDoc1 = lons[2 * docID];
            double latDoc2 = lats[2 * docID + 1];
            double lonDoc2 = lons[2 * docID + 1];
            boolean result1 = rectContainsPoint(rect, latDoc1, lonDoc1);
            boolean result2 = rectContainsPoint(rect, latDoc2, lonDoc2);
            boolean expected = result1 || result2;
            if (hits.get(docID) != expected) {
                String id = s.doc(docID).get("id");
                if (expected) {
                    System.out.println("TEST: id=" + id + " docID=" + docID + " should match but did not");
                } else {
                    System.out.println("TEST: id=" + id + " docID=" + docID + " should not match but did");
                }
                System.out.println("  rect=" + rect);
                System.out.println("  lat=" + latDoc1 + " lon=" + lonDoc1 + "\n  lat=" + latDoc2 + " lon=" + lonDoc2);
                System.out.println("  result1=" + result1 + " result2=" + result2);
                fail = true;
            }
        }
        if (fail) {
            fail("some hits were wrong");
        }
    }
    r.close();
    dir.close();
}
Also used : IndexSearcher(org.apache.lucene.search.IndexSearcher) Query(org.apache.lucene.search.Query) MatchNoDocsQuery(org.apache.lucene.search.MatchNoDocsQuery) IOException(java.io.IOException) Document(org.apache.lucene.document.Document) SerialMergeScheduler(org.apache.lucene.index.SerialMergeScheduler) SimpleCollector(org.apache.lucene.search.SimpleCollector) FixedBitSet(org.apache.lucene.util.FixedBitSet) IndexReader(org.apache.lucene.index.IndexReader) LeafReaderContext(org.apache.lucene.index.LeafReaderContext) RandomIndexWriter(org.apache.lucene.index.RandomIndexWriter) Directory(org.apache.lucene.store.Directory) IndexWriterConfig(org.apache.lucene.index.IndexWriterConfig)

Example 89 with RandomIndexWriter

use of org.apache.lucene.index.RandomIndexWriter in project lucene-solr by apache.

the class BaseGeoPointTestCase method searchSmallSet.

/** return topdocs over a small set of points in field "point" */
private TopDocs searchSmallSet(Query query, int size) throws Exception {
    // this is a simple systematic test, indexing these points
    // TODO: fragile: does not understand quantization in any way yet uses extremely high precision!
    double[][] pts = new double[][] { { 32.763420, -96.774 }, { 32.7559529921407, -96.7759895324707 }, { 32.77866942010977, -96.77701950073242 }, { 32.7756745755423, -96.7706036567688 }, { 27.703618681345585, -139.73458170890808 }, { 32.94823588839368, -96.4538113027811 }, { 33.06047141970814, -96.65084838867188 }, { 32.778650, -96.7772 }, { -88.56029371730983, -177.23537676036358 }, { 33.541429799076354, -26.779373834241003 }, { 26.774024500421728, -77.35379276106497 }, { -90.0, -14.796283808944777 }, { 32.94823588839368, -178.8538113027811 }, { 32.94823588839368, 178.8538113027811 }, { 40.720611, -73.998776 }, { -44.5, -179.5 } };
    Directory directory = newDirectory();
    // TODO: must these simple tests really rely on docid order?
    IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
    iwc.setMaxBufferedDocs(TestUtil.nextInt(random(), 100, 1000));
    iwc.setMergePolicy(newLogMergePolicy());
    // Else seeds may not reproduce:
    iwc.setMergeScheduler(new SerialMergeScheduler());
    RandomIndexWriter writer = new RandomIndexWriter(random(), directory, iwc);
    for (double[] p : pts) {
        Document doc = new Document();
        addPointToDoc("point", doc, p[0], p[1]);
        writer.addDocument(doc);
    }
    // add explicit multi-valued docs
    for (int i = 0; i < pts.length; i += 2) {
        Document doc = new Document();
        addPointToDoc("point", doc, pts[i][0], pts[i][1]);
        addPointToDoc("point", doc, pts[i + 1][0], pts[i + 1][1]);
        writer.addDocument(doc);
    }
    // index random string documents
    for (int i = 0; i < random().nextInt(10); ++i) {
        Document doc = new Document();
        doc.add(new StringField("string", Integer.toString(i), Field.Store.NO));
        writer.addDocument(doc);
    }
    IndexReader reader = writer.getReader();
    writer.close();
    IndexSearcher searcher = newSearcher(reader);
    TopDocs topDocs = searcher.search(query, size);
    reader.close();
    directory.close();
    return topDocs;
}
Also used : IndexSearcher(org.apache.lucene.search.IndexSearcher) Document(org.apache.lucene.document.Document) SerialMergeScheduler(org.apache.lucene.index.SerialMergeScheduler) TopDocs(org.apache.lucene.search.TopDocs) MockAnalyzer(org.apache.lucene.analysis.MockAnalyzer) StringField(org.apache.lucene.document.StringField) IndexReader(org.apache.lucene.index.IndexReader) RandomIndexWriter(org.apache.lucene.index.RandomIndexWriter) Directory(org.apache.lucene.store.Directory) IndexWriterConfig(org.apache.lucene.index.IndexWriterConfig)

Example 90 with RandomIndexWriter

use of org.apache.lucene.index.RandomIndexWriter in project lucene-solr by apache.

the class BaseGeoPointTestCase method verifyRandomRectangles.

protected void verifyRandomRectangles(double[] lats, double[] lons) throws Exception {
    IndexWriterConfig iwc = newIndexWriterConfig();
    // Else seeds may not reproduce:
    iwc.setMergeScheduler(new SerialMergeScheduler());
    // Else we can get O(N^2) merging:
    int mbd = iwc.getMaxBufferedDocs();
    if (mbd != -1 && mbd < lats.length / 100) {
        iwc.setMaxBufferedDocs(lats.length / 100);
    }
    Directory dir;
    if (lats.length > 100000) {
        dir = newFSDirectory(createTempDir(getClass().getSimpleName()));
    } else {
        dir = newDirectory();
    }
    Set<Integer> deleted = new HashSet<>();
    // RandomIndexWriter is too slow here:
    IndexWriter w = new IndexWriter(dir, iwc);
    for (int id = 0; id < lats.length; id++) {
        Document doc = new Document();
        doc.add(newStringField("id", "" + id, Field.Store.NO));
        doc.add(new NumericDocValuesField("id", id));
        if (Double.isNaN(lats[id]) == false) {
            addPointToDoc(FIELD_NAME, doc, lats[id], lons[id]);
        }
        w.addDocument(doc);
        if (id > 0 && random().nextInt(100) == 42) {
            int idToDelete = random().nextInt(id);
            w.deleteDocuments(new Term("id", "" + idToDelete));
            deleted.add(idToDelete);
            if (VERBOSE) {
                System.out.println("  delete id=" + idToDelete);
            }
        }
    }
    if (random().nextBoolean()) {
        w.forceMerge(1);
    }
    final IndexReader r = DirectoryReader.open(w);
    w.close();
    IndexSearcher s = newSearcher(r);
    int iters = atLeast(25);
    Bits liveDocs = MultiFields.getLiveDocs(s.getIndexReader());
    int maxDoc = s.getIndexReader().maxDoc();
    for (int iter = 0; iter < iters; iter++) {
        if (VERBOSE) {
            System.out.println("\nTEST: iter=" + iter + " s=" + s);
        }
        Rectangle rect = nextBox();
        Query query = newRectQuery(FIELD_NAME, rect.minLat, rect.maxLat, rect.minLon, rect.maxLon);
        if (VERBOSE) {
            System.out.println("  query=" + query);
        }
        final FixedBitSet hits = new FixedBitSet(maxDoc);
        s.search(query, new SimpleCollector() {

            private int docBase;

            @Override
            public boolean needsScores() {
                return false;
            }

            @Override
            protected void doSetNextReader(LeafReaderContext context) throws IOException {
                docBase = context.docBase;
            }

            @Override
            public void collect(int doc) {
                hits.set(docBase + doc);
            }
        });
        boolean fail = false;
        NumericDocValues docIDToID = MultiDocValues.getNumericValues(r, "id");
        for (int docID = 0; docID < maxDoc; docID++) {
            assertEquals(docID, docIDToID.nextDoc());
            int id = (int) docIDToID.longValue();
            boolean expected;
            if (liveDocs != null && liveDocs.get(docID) == false) {
                // document is deleted
                expected = false;
            } else if (Double.isNaN(lats[id])) {
                expected = false;
            } else {
                expected = rectContainsPoint(rect, lats[id], lons[id]);
            }
            if (hits.get(docID) != expected) {
                StringBuilder b = new StringBuilder();
                b.append("docID=(" + docID + ")\n");
                if (expected) {
                    b.append("FAIL: id=" + id + " should match but did not\n");
                } else {
                    b.append("FAIL: id=" + id + " should not match but did\n");
                }
                b.append("  box=" + rect + "\n");
                b.append("  query=" + query + " docID=" + docID + "\n");
                b.append("  lat=" + lats[id] + " lon=" + lons[id] + "\n");
                b.append("  deleted?=" + (liveDocs != null && liveDocs.get(docID) == false));
                if (true) {
                    fail("wrong hit (first of possibly more):\n\n" + b);
                } else {
                    System.out.println(b.toString());
                    fail = true;
                }
            }
        }
        if (fail) {
            fail("some hits were wrong");
        }
    }
    IOUtils.close(r, dir);
}
Also used : IndexSearcher(org.apache.lucene.search.IndexSearcher) NumericDocValues(org.apache.lucene.index.NumericDocValues) Query(org.apache.lucene.search.Query) MatchNoDocsQuery(org.apache.lucene.search.MatchNoDocsQuery) Term(org.apache.lucene.index.Term) IOException(java.io.IOException) Document(org.apache.lucene.document.Document) SerialMergeScheduler(org.apache.lucene.index.SerialMergeScheduler) SimpleCollector(org.apache.lucene.search.SimpleCollector) NumericDocValuesField(org.apache.lucene.document.NumericDocValuesField) IndexWriter(org.apache.lucene.index.IndexWriter) RandomIndexWriter(org.apache.lucene.index.RandomIndexWriter) FixedBitSet(org.apache.lucene.util.FixedBitSet) IndexReader(org.apache.lucene.index.IndexReader) Bits(org.apache.lucene.util.Bits) LeafReaderContext(org.apache.lucene.index.LeafReaderContext) IndexWriterConfig(org.apache.lucene.index.IndexWriterConfig) Directory(org.apache.lucene.store.Directory) HashSet(java.util.HashSet)

Aggregations

RandomIndexWriter (org.apache.lucene.index.RandomIndexWriter)779 Document (org.apache.lucene.document.Document)679 Directory (org.apache.lucene.store.Directory)588 IndexReader (org.apache.lucene.index.IndexReader)510 Term (org.apache.lucene.index.Term)325 IndexSearcher (org.apache.lucene.search.IndexSearcher)294 MockAnalyzer (org.apache.lucene.analysis.MockAnalyzer)220 BytesRef (org.apache.lucene.util.BytesRef)142 Field (org.apache.lucene.document.Field)141 MatchAllDocsQuery (org.apache.lucene.search.MatchAllDocsQuery)136 TopDocs (org.apache.lucene.search.TopDocs)134 TermQuery (org.apache.lucene.search.TermQuery)121 DirectoryReader (org.apache.lucene.index.DirectoryReader)120 IndexWriterConfig (org.apache.lucene.index.IndexWriterConfig)110 ArrayList (java.util.ArrayList)95 StringField (org.apache.lucene.document.StringField)93 Analyzer (org.apache.lucene.analysis.Analyzer)88 BooleanQuery (org.apache.lucene.search.BooleanQuery)88 NumericDocValuesField (org.apache.lucene.document.NumericDocValuesField)77 Test (org.junit.Test)75