Search in sources :

Example 46 with FieldDoc

use of org.apache.lucene.search.FieldDoc in project lucene-solr by apache.

the class TestLatLonPointDistanceSort method testDistanceSort.

/** Add three points and sort by distance */
public void testDistanceSort() throws Exception {
    Directory dir = newDirectory();
    RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
    // add some docs
    Document doc = new Document();
    doc.add(new LatLonDocValuesField("location", 40.759011, -73.9844722));
    iw.addDocument(doc);
    doc = new Document();
    doc.add(new LatLonDocValuesField("location", 40.718266, -74.007819));
    iw.addDocument(doc);
    doc = new Document();
    doc.add(new LatLonDocValuesField("location", 40.7051157, -74.0088305));
    iw.addDocument(doc);
    IndexReader reader = iw.getReader();
    IndexSearcher searcher = newSearcher(reader);
    iw.close();
    Sort sort = new Sort(LatLonDocValuesField.newDistanceSort("location", 40.7143528, -74.0059731));
    TopDocs td = searcher.search(new MatchAllDocsQuery(), 3, sort);
    FieldDoc d = (FieldDoc) td.scoreDocs[0];
    assertEquals(462.1028401330431, (Double) d.fields[0], 0.0D);
    d = (FieldDoc) td.scoreDocs[1];
    assertEquals(1054.9842850974826, (Double) d.fields[0], 0.0D);
    d = (FieldDoc) td.scoreDocs[2];
    assertEquals(5285.881528419706, (Double) d.fields[0], 0.0D);
    reader.close();
    dir.close();
}
Also used : IndexSearcher(org.apache.lucene.search.IndexSearcher) TopDocs(org.apache.lucene.search.TopDocs) FieldDoc(org.apache.lucene.search.FieldDoc) IndexReader(org.apache.lucene.index.IndexReader) Sort(org.apache.lucene.search.Sort) MatchAllDocsQuery(org.apache.lucene.search.MatchAllDocsQuery) RandomIndexWriter(org.apache.lucene.index.RandomIndexWriter) Directory(org.apache.lucene.store.Directory)

Example 47 with FieldDoc

use of org.apache.lucene.search.FieldDoc in project lucene-solr by apache.

the class TestLatLonPointDistanceSort method testMissingLast.

/** Add two points (one doc missing) and sort by distance */
public void testMissingLast() throws Exception {
    Directory dir = newDirectory();
    RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
    // missing
    Document doc = new Document();
    iw.addDocument(doc);
    doc = new Document();
    doc.add(new LatLonDocValuesField("location", 40.718266, -74.007819));
    iw.addDocument(doc);
    doc = new Document();
    doc.add(new LatLonDocValuesField("location", 40.7051157, -74.0088305));
    iw.addDocument(doc);
    IndexReader reader = iw.getReader();
    IndexSearcher searcher = newSearcher(reader);
    iw.close();
    Sort sort = new Sort(LatLonDocValuesField.newDistanceSort("location", 40.7143528, -74.0059731));
    TopDocs td = searcher.search(new MatchAllDocsQuery(), 3, sort);
    FieldDoc d = (FieldDoc) td.scoreDocs[0];
    assertEquals(462.1028401330431D, (Double) d.fields[0], 0.0D);
    d = (FieldDoc) td.scoreDocs[1];
    assertEquals(1054.9842850974826, (Double) d.fields[0], 0.0D);
    d = (FieldDoc) td.scoreDocs[2];
    assertEquals(Double.POSITIVE_INFINITY, (Double) d.fields[0], 0.0D);
    reader.close();
    dir.close();
}
Also used : IndexSearcher(org.apache.lucene.search.IndexSearcher) TopDocs(org.apache.lucene.search.TopDocs) FieldDoc(org.apache.lucene.search.FieldDoc) IndexReader(org.apache.lucene.index.IndexReader) Sort(org.apache.lucene.search.Sort) MatchAllDocsQuery(org.apache.lucene.search.MatchAllDocsQuery) RandomIndexWriter(org.apache.lucene.index.RandomIndexWriter) Directory(org.apache.lucene.store.Directory)

Example 48 with FieldDoc

use of org.apache.lucene.search.FieldDoc in project lucene-solr by apache.

the class TestNumericDocValuesUpdates method testBiasedMixOfRandomUpdates.

public void testBiasedMixOfRandomUpdates() throws Exception {
    // 3 types of operations: add, updated, updateDV.
    // rather then randomizing equally, we'll pick (random) cutoffs so each test run is biased,
    // in terms of some ops happen more often then others
    final int ADD_CUTOFF = TestUtil.nextInt(random(), 1, 98);
    final int UPD_CUTOFF = TestUtil.nextInt(random(), ADD_CUTOFF + 1, 99);
    Directory dir = newDirectory();
    IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
    IndexWriter writer = new IndexWriter(dir, conf);
    final int numOperations = atLeast(1000);
    final Map<Integer, Long> expected = new HashMap<>(numOperations / 3);
    // start with at least one doc before any chance of updates
    final int numSeedDocs = atLeast(1);
    for (int i = 0; i < numSeedDocs; i++) {
        final long val = random().nextLong();
        expected.put(i, val);
        writer.addDocument(doc(i, val));
    }
    int numDocUpdates = 0;
    int numValueUpdates = 0;
    for (int i = 0; i < numOperations; i++) {
        final int op = TestUtil.nextInt(random(), 1, 100);
        final long val = random().nextLong();
        if (op <= ADD_CUTOFF) {
            final int id = expected.size();
            //System.out.println("TEST i=" + i + ": addDocument id=" + id + " val=" + val);
            expected.put(id, val);
            writer.addDocument(doc(id, val));
        } else {
            final int id = TestUtil.nextInt(random(), 0, expected.size() - 1);
            expected.put(id, val);
            if (op <= UPD_CUTOFF) {
                numDocUpdates++;
                //System.out.println("TEST i=" + i + ": updateDocument id=" + id + " val=" + val);
                writer.updateDocument(new Term("id", "doc-" + id), doc(id, val));
            } else {
                numValueUpdates++;
                //System.out.println("TEST i=" + i + ": updateDV id=" + id + " val=" + val);
                writer.updateNumericDocValue(new Term("id", "doc-" + id), "val", val);
            }
        }
    }
    writer.commit();
    final DirectoryReader reader = DirectoryReader.open(dir);
    final IndexSearcher searcher = new IndexSearcher(reader);
    // TODO: make more efficient if max numOperations is going to be increased much
    for (Map.Entry<Integer, Long> expect : expected.entrySet()) {
        String id = "doc-" + expect.getKey();
        TopFieldDocs td = searcher.search(new TermQuery(new Term("id", id)), 1, new Sort(new SortField("val", SortField.Type.LONG)));
        assertEquals(id + " missing?", 1, td.totalHits);
        assertEquals(id + " value", expect.getValue(), ((FieldDoc) td.scoreDocs[0]).fields[0]);
    }
    IOUtils.close(reader, writer, dir);
}
Also used : IndexSearcher(org.apache.lucene.search.IndexSearcher) TermQuery(org.apache.lucene.search.TermQuery) FieldDoc(org.apache.lucene.search.FieldDoc) HashMap(java.util.HashMap) TopFieldDocs(org.apache.lucene.search.TopFieldDocs) SortField(org.apache.lucene.search.SortField) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) MockAnalyzer(org.apache.lucene.analysis.MockAnalyzer) Sort(org.apache.lucene.search.Sort) HashMap(java.util.HashMap) Map(java.util.Map) Directory(org.apache.lucene.store.Directory) NRTCachingDirectory(org.apache.lucene.store.NRTCachingDirectory)

Example 49 with FieldDoc

use of org.apache.lucene.search.FieldDoc in project lucene-solr by apache.

the class TestNumericDocValuesUpdates method testMultipleUpdatesSameDoc.

public void testMultipleUpdatesSameDoc() throws Exception {
    Directory dir = newDirectory();
    IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
    // small number of docs, so use a tiny maxBufferedDocs
    conf.setMaxBufferedDocs(3);
    IndexWriter writer = new IndexWriter(dir, conf);
    writer.updateDocument(new Term("id", "doc-1"), doc(1, 1000000000L));
    writer.updateNumericDocValue(new Term("id", "doc-1"), "val", 1000001111L);
    writer.updateDocument(new Term("id", "doc-2"), doc(2, 2000000000L));
    writer.updateDocument(new Term("id", "doc-2"), doc(2, 2222222222L));
    writer.updateNumericDocValue(new Term("id", "doc-1"), "val", 1111111111L);
    writer.commit();
    final DirectoryReader reader = DirectoryReader.open(dir);
    final IndexSearcher searcher = new IndexSearcher(reader);
    TopFieldDocs td;
    td = searcher.search(new TermQuery(new Term("id", "doc-1")), 1, new Sort(new SortField("val", SortField.Type.LONG)));
    assertEquals("doc-1 missing?", 1, td.scoreDocs.length);
    assertEquals("doc-1 value", 1111111111L, ((FieldDoc) td.scoreDocs[0]).fields[0]);
    td = searcher.search(new TermQuery(new Term("id", "doc-2")), 1, new Sort(new SortField("val", SortField.Type.LONG)));
    assertEquals("doc-2 missing?", 1, td.scoreDocs.length);
    assertEquals("doc-2 value", 2222222222L, ((FieldDoc) td.scoreDocs[0]).fields[0]);
    IOUtils.close(reader, writer, dir);
}
Also used : IndexSearcher(org.apache.lucene.search.IndexSearcher) TermQuery(org.apache.lucene.search.TermQuery) MockAnalyzer(org.apache.lucene.analysis.MockAnalyzer) FieldDoc(org.apache.lucene.search.FieldDoc) TopFieldDocs(org.apache.lucene.search.TopFieldDocs) Sort(org.apache.lucene.search.Sort) SortField(org.apache.lucene.search.SortField) Directory(org.apache.lucene.store.Directory) NRTCachingDirectory(org.apache.lucene.store.NRTCachingDirectory)

Example 50 with FieldDoc

use of org.apache.lucene.search.FieldDoc in project lucene-solr by apache.

the class TopGroupsResultTransformer method serializeTopGroups.

protected NamedList serializeTopGroups(TopGroups<BytesRef> data, SchemaField groupField) throws IOException {
    NamedList<Object> result = new NamedList<>();
    result.add("totalGroupedHitCount", data.totalGroupedHitCount);
    result.add("totalHitCount", data.totalHitCount);
    if (data.totalGroupCount != null) {
        result.add("totalGroupCount", data.totalGroupCount);
    }
    final IndexSchema schema = rb.req.getSearcher().getSchema();
    SchemaField uniqueField = schema.getUniqueKeyField();
    for (GroupDocs<BytesRef> searchGroup : data.groups) {
        NamedList<Object> groupResult = new NamedList<>();
        groupResult.add("totalHits", searchGroup.totalHits);
        if (!Float.isNaN(searchGroup.maxScore)) {
            groupResult.add("maxScore", searchGroup.maxScore);
        }
        List<NamedList<Object>> documents = new ArrayList<>();
        for (int i = 0; i < searchGroup.scoreDocs.length; i++) {
            NamedList<Object> document = new NamedList<>();
            documents.add(document);
            Document doc = retrieveDocument(uniqueField, searchGroup.scoreDocs[i].doc);
            document.add(ID, uniqueField.getType().toExternal(doc.getField(uniqueField.getName())));
            if (!Float.isNaN(searchGroup.scoreDocs[i].score)) {
                document.add("score", searchGroup.scoreDocs[i].score);
            }
            if (!(searchGroup.scoreDocs[i] instanceof FieldDoc)) {
                // thus don't add sortValues below
                continue;
            }
            FieldDoc fieldDoc = (FieldDoc) searchGroup.scoreDocs[i];
            Object[] convertedSortValues = new Object[fieldDoc.fields.length];
            for (int j = 0; j < fieldDoc.fields.length; j++) {
                Object sortValue = fieldDoc.fields[j];
                Sort withinGroupSort = rb.getGroupingSpec().getSortWithinGroup();
                SchemaField field = withinGroupSort.getSort()[j].getField() != null ? schema.getFieldOrNull(withinGroupSort.getSort()[j].getField()) : null;
                if (field != null) {
                    FieldType fieldType = field.getType();
                    if (sortValue != null) {
                        sortValue = fieldType.marshalSortValue(sortValue);
                    }
                }
                convertedSortValues[j] = sortValue;
            }
            document.add("sortValues", convertedSortValues);
        }
        groupResult.add("documents", documents);
        String groupValue = searchGroup.groupValue != null ? groupField.getType().indexedToReadable(searchGroup.groupValue, new CharsRefBuilder()).toString() : null;
        result.add(groupValue, groupResult);
    }
    return result;
}
Also used : FieldDoc(org.apache.lucene.search.FieldDoc) NamedList(org.apache.solr.common.util.NamedList) ArrayList(java.util.ArrayList) Document(org.apache.lucene.document.Document) FieldType(org.apache.solr.schema.FieldType) SchemaField(org.apache.solr.schema.SchemaField) Sort(org.apache.lucene.search.Sort) IndexSchema(org.apache.solr.schema.IndexSchema) CharsRefBuilder(org.apache.lucene.util.CharsRefBuilder) BytesRef(org.apache.lucene.util.BytesRef)

Aggregations

FieldDoc (org.apache.lucene.search.FieldDoc)50 Sort (org.apache.lucene.search.Sort)26 TopFieldDocs (org.apache.lucene.search.TopFieldDocs)23 SortField (org.apache.lucene.search.SortField)21 ScoreDoc (org.apache.lucene.search.ScoreDoc)20 MatchAllDocsQuery (org.apache.lucene.search.MatchAllDocsQuery)18 IndexSearcher (org.apache.lucene.search.IndexSearcher)17 ArrayList (java.util.ArrayList)14 Directory (org.apache.lucene.store.Directory)14 TopDocs (org.apache.lucene.search.TopDocs)12 RandomIndexWriter (org.apache.lucene.index.RandomIndexWriter)10 TermQuery (org.apache.lucene.search.TermQuery)10 Document (org.apache.lucene.document.Document)9 Term (org.apache.lucene.index.Term)9 BytesRef (org.apache.lucene.util.BytesRef)9 HashMap (java.util.HashMap)8 Query (org.apache.lucene.search.Query)8 IndexReader (org.apache.lucene.index.IndexReader)7 HashSet (java.util.HashSet)5 DirectoryReader (org.apache.lucene.index.DirectoryReader)5