Search in sources :

Example 81 with LeafReaderContext

use of org.apache.lucene.index.LeafReaderContext in project lucene-solr by apache.

the class DeleteByQueryWrapper method createWeight.

@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
    final LeafReader wrapped = wrap((LeafReader) searcher.getIndexReader());
    final IndexSearcher privateContext = new IndexSearcher(wrapped);
    privateContext.setQueryCache(searcher.getQueryCache());
    final Weight inner = in.createWeight(privateContext, needsScores, boost);
    return new Weight(DeleteByQueryWrapper.this) {

        @Override
        public void extractTerms(Set<Term> terms) {
            throw new UnsupportedOperationException();
        }

        @Override
        public Explanation explain(LeafReaderContext context, int doc) throws IOException {
            throw new UnsupportedOperationException();
        }

        @Override
        public Scorer scorer(LeafReaderContext context) throws IOException {
            return inner.scorer(privateContext.getIndexReader().leaves().get(0));
        }
    };
}
Also used : IndexSearcher(org.apache.lucene.search.IndexSearcher) Set(java.util.Set) LeafReader(org.apache.lucene.index.LeafReader) LeafReaderContext(org.apache.lucene.index.LeafReaderContext) Weight(org.apache.lucene.search.Weight)

Example 82 with LeafReaderContext

use of org.apache.lucene.index.LeafReaderContext in project lucene-solr by apache.

the class DirectUpdateHandler2 method mergeIndexes.

@Override
public int mergeIndexes(MergeIndexesCommand cmd) throws IOException {
    mergeIndexesCommands.mark();
    int rc;
    log.info("start " + cmd);
    List<DirectoryReader> readers = cmd.readers;
    if (readers != null && readers.size() > 0) {
        List<CodecReader> mergeReaders = new ArrayList<>();
        for (DirectoryReader reader : readers) {
            for (LeafReaderContext leaf : reader.leaves()) {
                mergeReaders.add(SlowCodecReaderWrapper.wrap(leaf.reader()));
            }
        }
        RefCounted<IndexWriter> iw = solrCoreState.getIndexWriter(core);
        try {
            iw.get().addIndexes(mergeReaders.toArray(new CodecReader[mergeReaders.size()]));
        } finally {
            iw.decref();
        }
        rc = 1;
    } else {
        rc = 0;
    }
    log.info("end_mergeIndexes");
    // TODO: consider soft commit issues
    if (rc == 1 && commitTracker.getTimeUpperBound() > 0) {
        commitTracker.scheduleCommitWithin(commitTracker.getTimeUpperBound());
    } else if (rc == 1 && softCommitTracker.getTimeUpperBound() > 0) {
        softCommitTracker.scheduleCommitWithin(softCommitTracker.getTimeUpperBound());
    }
    return rc;
}
Also used : CodecReader(org.apache.lucene.index.CodecReader) DirectoryReader(org.apache.lucene.index.DirectoryReader) IndexWriter(org.apache.lucene.index.IndexWriter) ArrayList(java.util.ArrayList) LeafReaderContext(org.apache.lucene.index.LeafReaderContext)

Example 83 with LeafReaderContext

use of org.apache.lucene.index.LeafReaderContext in project lucene-solr by apache.

the class SolrIndexSplitter method split.

public void split() throws IOException {
    List<LeafReaderContext> leaves = searcher.getRawReader().leaves();
    List<FixedBitSet[]> segmentDocSets = new ArrayList<>(leaves.size());
    log.info("SolrIndexSplitter: partitions=" + numPieces + " segments=" + leaves.size());
    for (LeafReaderContext readerContext : leaves) {
        // make sure we're going in order
        assert readerContext.ordInParent == segmentDocSets.size();
        FixedBitSet[] docSets = split(readerContext);
        segmentDocSets.add(docSets);
    }
    for (int partitionNumber = 0; partitionNumber < numPieces; partitionNumber++) {
        log.info("SolrIndexSplitter: partition #" + partitionNumber + " partitionCount=" + numPieces + (ranges != null ? " range=" + ranges.get(partitionNumber) : ""));
        boolean success = false;
        RefCounted<IndexWriter> iwRef = null;
        IndexWriter iw = null;
        if (cores != null) {
            SolrCore subCore = cores.get(partitionNumber);
            iwRef = subCore.getUpdateHandler().getSolrCoreState().getIndexWriter(subCore);
            iw = iwRef.get();
        } else {
            SolrCore core = searcher.getCore();
            String path = paths.get(partitionNumber);
            iw = SolrIndexWriter.create(core, "SplittingIndexWriter" + partitionNumber + (ranges != null ? " " + ranges.get(partitionNumber) : ""), path, core.getDirectoryFactory(), true, core.getLatestSchema(), core.getSolrConfig().indexConfig, core.getDeletionPolicy(), core.getCodec());
        }
        try {
            // This removes deletions but optimize might still be needed because sub-shards will have the same number of segments as the parent shard.
            for (int segmentNumber = 0; segmentNumber < leaves.size(); segmentNumber++) {
                log.info("SolrIndexSplitter: partition #" + partitionNumber + " partitionCount=" + numPieces + (ranges != null ? " range=" + ranges.get(partitionNumber) : "") + " segment #" + segmentNumber + " segmentCount=" + leaves.size());
                CodecReader subReader = SlowCodecReaderWrapper.wrap(leaves.get(segmentNumber).reader());
                iw.addIndexes(new LiveDocsReader(subReader, segmentDocSets.get(segmentNumber)[partitionNumber]));
            }
            // we commit explicitly instead of sending a CommitUpdateCommand through the processor chain
            // because the sub-shard cores will just ignore such a commit because the update log is not
            // in active state at this time.
            //TODO no commitUpdateCommand
            SolrIndexWriter.setCommitData(iw, -1);
            iw.commit();
            success = true;
        } finally {
            if (iwRef != null) {
                iwRef.decref();
            } else {
                if (success) {
                    iw.close();
                } else {
                    IOUtils.closeWhileHandlingException(iw);
                }
            }
        }
    }
}
Also used : FilterCodecReader(org.apache.lucene.index.FilterCodecReader) CodecReader(org.apache.lucene.index.CodecReader) SolrCore(org.apache.solr.core.SolrCore) ArrayList(java.util.ArrayList) IndexWriter(org.apache.lucene.index.IndexWriter) FixedBitSet(org.apache.lucene.util.FixedBitSet) LeafReaderContext(org.apache.lucene.index.LeafReaderContext)

Example 84 with LeafReaderContext

use of org.apache.lucene.index.LeafReaderContext in project lucene-solr by apache.

the class TestDrillSideways method testRandom.

public void testRandom() throws Exception {
    while (aChance == 0.0) {
        aChance = random().nextDouble();
    }
    while (bChance == 0.0) {
        bChance = random().nextDouble();
    }
    while (cChance == 0.0) {
        cChance = random().nextDouble();
    }
    //aChance = .01;
    //bChance = 0.5;
    //cChance = 1.0;
    double sum = aChance + bChance + cChance;
    aChance /= sum;
    bChance /= sum;
    cChance /= sum;
    int numDims = TestUtil.nextInt(random(), 2, 5);
    //int numDims = 3;
    int numDocs = atLeast(3000);
    //int numDocs = 20;
    if (VERBOSE) {
        System.out.println("numDims=" + numDims + " numDocs=" + numDocs + " aChance=" + aChance + " bChance=" + bChance + " cChance=" + cChance);
    }
    String[][] dimValues = new String[numDims][];
    int valueCount = 2;
    for (int dim = 0; dim < numDims; dim++) {
        Set<String> values = new HashSet<>();
        while (values.size() < valueCount) {
            String s = TestUtil.randomRealisticUnicodeString(random());
            //String s = _TestUtil.randomString(random());
            if (s.length() > 0) {
                values.add(s);
            }
        }
        dimValues[dim] = values.toArray(new String[values.size()]);
        valueCount *= 2;
    }
    List<Doc> docs = new ArrayList<>();
    for (int i = 0; i < numDocs; i++) {
        Doc doc = new Doc();
        doc.id = "" + i;
        doc.contentToken = randomContentToken(false);
        doc.dims = new int[numDims];
        doc.dims2 = new int[numDims];
        for (int dim = 0; dim < numDims; dim++) {
            if (random().nextInt(5) == 3) {
                // This doc is missing this dim:
                doc.dims[dim] = -1;
            } else if (dimValues[dim].length <= 4) {
                int dimUpto = 0;
                doc.dims[dim] = dimValues[dim].length - 1;
                while (dimUpto < dimValues[dim].length) {
                    if (random().nextBoolean()) {
                        doc.dims[dim] = dimUpto;
                        break;
                    }
                    dimUpto++;
                }
            } else {
                doc.dims[dim] = random().nextInt(dimValues[dim].length);
            }
            if (random().nextInt(5) == 3) {
                // 2nd value:
                doc.dims2[dim] = random().nextInt(dimValues[dim].length);
            } else {
                doc.dims2[dim] = -1;
            }
        }
        docs.add(doc);
    }
    Directory d = newDirectory();
    Directory td = newDirectory();
    IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
    iwc.setInfoStream(InfoStream.NO_OUTPUT);
    RandomIndexWriter w = new RandomIndexWriter(random(), d, iwc);
    DirectoryTaxonomyWriter tw = new DirectoryTaxonomyWriter(td, IndexWriterConfig.OpenMode.CREATE);
    FacetsConfig config = new FacetsConfig();
    for (int i = 0; i < numDims; i++) {
        config.setMultiValued("dim" + i, true);
    }
    boolean doUseDV = random().nextBoolean();
    for (Doc rawDoc : docs) {
        Document doc = new Document();
        doc.add(newStringField("id", rawDoc.id, Field.Store.YES));
        doc.add(new SortedDocValuesField("id", new BytesRef(rawDoc.id)));
        doc.add(newStringField("content", rawDoc.contentToken, Field.Store.NO));
        if (VERBOSE) {
            System.out.println("  doc id=" + rawDoc.id + " token=" + rawDoc.contentToken);
        }
        for (int dim = 0; dim < numDims; dim++) {
            int dimValue = rawDoc.dims[dim];
            if (dimValue != -1) {
                if (doUseDV) {
                    doc.add(new SortedSetDocValuesFacetField("dim" + dim, dimValues[dim][dimValue]));
                } else {
                    doc.add(new FacetField("dim" + dim, dimValues[dim][dimValue]));
                }
                doc.add(new StringField("dim" + dim, dimValues[dim][dimValue], Field.Store.YES));
                if (VERBOSE) {
                    System.out.println("    dim" + dim + "=" + new BytesRef(dimValues[dim][dimValue]));
                }
            }
            int dimValue2 = rawDoc.dims2[dim];
            if (dimValue2 != -1) {
                if (doUseDV) {
                    doc.add(new SortedSetDocValuesFacetField("dim" + dim, dimValues[dim][dimValue2]));
                } else {
                    doc.add(new FacetField("dim" + dim, dimValues[dim][dimValue2]));
                }
                doc.add(new StringField("dim" + dim, dimValues[dim][dimValue2], Field.Store.YES));
                if (VERBOSE) {
                    System.out.println("      dim" + dim + "=" + new BytesRef(dimValues[dim][dimValue2]));
                }
            }
        }
        w.addDocument(config.build(tw, doc));
    }
    if (random().nextBoolean()) {
        // Randomly delete a few docs:
        int numDel = TestUtil.nextInt(random(), 1, (int) (numDocs * 0.05));
        if (VERBOSE) {
            System.out.println("delete " + numDel);
        }
        int delCount = 0;
        while (delCount < numDel) {
            Doc doc = docs.get(random().nextInt(docs.size()));
            if (!doc.deleted) {
                if (VERBOSE) {
                    System.out.println("  delete id=" + doc.id);
                }
                doc.deleted = true;
                w.deleteDocuments(new Term("id", doc.id));
                delCount++;
            }
        }
    }
    if (random().nextBoolean()) {
        if (VERBOSE) {
            System.out.println("TEST: forceMerge(1)...");
        }
        w.forceMerge(1);
    }
    IndexReader r = w.getReader();
    final SortedSetDocValuesReaderState sortedSetDVState;
    IndexSearcher s = newSearcher(r);
    if (doUseDV) {
        sortedSetDVState = new DefaultSortedSetDocValuesReaderState(s.getIndexReader());
    } else {
        sortedSetDVState = null;
    }
    if (VERBOSE) {
        System.out.println("r.numDocs() = " + r.numDocs());
    }
    // NRT open
    TaxonomyReader tr = new DirectoryTaxonomyReader(tw);
    int numIters = atLeast(10);
    for (int iter = 0; iter < numIters; iter++) {
        String contentToken = random().nextInt(30) == 17 ? null : randomContentToken(true);
        int numDrillDown = TestUtil.nextInt(random(), 1, Math.min(4, numDims));
        if (VERBOSE) {
            System.out.println("\nTEST: iter=" + iter + " baseQuery=" + contentToken + " numDrillDown=" + numDrillDown + " useSortedSetDV=" + doUseDV);
        }
        String[][] drillDowns = new String[numDims][];
        int count = 0;
        boolean anyMultiValuedDrillDowns = false;
        while (count < numDrillDown) {
            int dim = random().nextInt(numDims);
            if (drillDowns[dim] == null) {
                if (random().nextBoolean()) {
                    // Drill down on one value:
                    drillDowns[dim] = new String[] { dimValues[dim][random().nextInt(dimValues[dim].length)] };
                } else {
                    int orCount = TestUtil.nextInt(random(), 1, Math.min(5, dimValues[dim].length));
                    drillDowns[dim] = new String[orCount];
                    anyMultiValuedDrillDowns |= orCount > 1;
                    for (int i = 0; i < orCount; i++) {
                        while (true) {
                            String value = dimValues[dim][random().nextInt(dimValues[dim].length)];
                            for (int j = 0; j < i; j++) {
                                if (value.equals(drillDowns[dim][j])) {
                                    value = null;
                                    break;
                                }
                            }
                            if (value != null) {
                                drillDowns[dim][i] = value;
                                break;
                            }
                        }
                    }
                }
                if (VERBOSE) {
                    BytesRef[] values = new BytesRef[drillDowns[dim].length];
                    for (int i = 0; i < values.length; i++) {
                        values[i] = new BytesRef(drillDowns[dim][i]);
                    }
                    System.out.println("  dim" + dim + "=" + Arrays.toString(values));
                }
                count++;
            }
        }
        Query baseQuery;
        if (contentToken == null) {
            baseQuery = new MatchAllDocsQuery();
        } else {
            baseQuery = new TermQuery(new Term("content", contentToken));
        }
        DrillDownQuery ddq = new DrillDownQuery(config, baseQuery);
        for (int dim = 0; dim < numDims; dim++) {
            if (drillDowns[dim] != null) {
                for (String value : drillDowns[dim]) {
                    ddq.add("dim" + dim, value);
                }
            }
        }
        Query filter;
        if (random().nextInt(7) == 6) {
            if (VERBOSE) {
                System.out.println("  only-even filter");
            }
            filter = new Query() {

                @Override
                public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
                    return new ConstantScoreWeight(this, boost) {

                        @Override
                        public Scorer scorer(LeafReaderContext context) throws IOException {
                            DocIdSetIterator approximation = DocIdSetIterator.all(context.reader().maxDoc());
                            return new ConstantScoreScorer(this, score(), new TwoPhaseIterator(approximation) {

                                @Override
                                public boolean matches() throws IOException {
                                    int docID = approximation.docID();
                                    return (Integer.parseInt(context.reader().document(docID).get("id")) & 1) == 0;
                                }

                                @Override
                                public float matchCost() {
                                    return 1000f;
                                }
                            });
                        }
                    };
                }

                @Override
                public String toString(String field) {
                    return "drillSidewaysTestFilter";
                }

                @Override
                public boolean equals(Object o) {
                    return o == this;
                }

                @Override
                public int hashCode() {
                    return System.identityHashCode(this);
                }
            };
        } else {
            filter = null;
        }
        // Verify docs are always collected in order.  If we
        // had an AssertingScorer it could catch it when
        // Weight.scoresDocsOutOfOrder lies!:
        getNewDrillSideways(s, config, tr).search(ddq, new SimpleCollector() {

            int lastDocID;

            @Override
            public void collect(int doc) {
                assert doc > lastDocID;
                lastDocID = doc;
            }

            @Override
            protected void doSetNextReader(LeafReaderContext context) throws IOException {
                lastDocID = -1;
            }

            @Override
            public boolean needsScores() {
                return false;
            }
        });
        // subScorers are on the same docID:
        if (!anyMultiValuedDrillDowns) {
            // Can only do this test when there are no OR'd
            // drill-down values, because in that case it's
            // easily possible for one of the DD terms to be on
            // a future docID:
            getNewDrillSidewaysScoreSubdocsAtOnce(s, config, tr).search(ddq, new AssertingSubDocsAtOnceCollector());
        }
        TestFacetResult expected = slowDrillSidewaysSearch(s, docs, contentToken, drillDowns, dimValues, filter);
        Sort sort = new Sort(new SortField("id", SortField.Type.STRING));
        DrillSideways ds;
        if (doUseDV) {
            ds = getNewDrillSideways(s, config, sortedSetDVState);
        } else {
            ds = getNewDrillSidewaysBuildFacetsResult(s, config, tr);
        }
        // Retrieve all facets:
        DrillSidewaysResult actual = ds.search(ddq, filter, null, numDocs, sort, true, true);
        TopDocs hits = s.search(baseQuery, numDocs);
        Map<String, Float> scores = new HashMap<>();
        for (ScoreDoc sd : hits.scoreDocs) {
            scores.put(s.doc(sd.doc).get("id"), sd.score);
        }
        if (VERBOSE) {
            System.out.println("  verify all facets");
        }
        verifyEquals(dimValues, s, expected, actual, scores, doUseDV);
        // Make sure drill down doesn't change score:
        Query q = ddq;
        if (filter != null) {
            q = new BooleanQuery.Builder().add(q, Occur.MUST).add(filter, Occur.FILTER).build();
        }
        TopDocs ddqHits = s.search(q, numDocs);
        assertEquals(expected.hits.size(), ddqHits.totalHits);
        for (int i = 0; i < expected.hits.size(); i++) {
            // Score should be IDENTICAL:
            assertEquals(scores.get(expected.hits.get(i).id), ddqHits.scoreDocs[i].score, 0.0f);
        }
    }
    w.close();
    IOUtils.close(r, tr, tw, d, td);
}
Also used : Query(org.apache.lucene.search.Query) MatchAllDocsQuery(org.apache.lucene.search.MatchAllDocsQuery) TermQuery(org.apache.lucene.search.TermQuery) BooleanQuery(org.apache.lucene.search.BooleanQuery) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ConstantScoreScorer(org.apache.lucene.search.ConstantScoreScorer) Scorer(org.apache.lucene.search.Scorer) SortField(org.apache.lucene.search.SortField) Document(org.apache.lucene.document.Document) ScoreDoc(org.apache.lucene.search.ScoreDoc) SimpleCollector(org.apache.lucene.search.SimpleCollector) MockAnalyzer(org.apache.lucene.analysis.MockAnalyzer) ScoreDoc(org.apache.lucene.search.ScoreDoc) LeafReaderContext(org.apache.lucene.index.LeafReaderContext) Sort(org.apache.lucene.search.Sort) HashSet(java.util.HashSet) DirectoryTaxonomyReader(org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyReader) SortedSetDocValuesReaderState(org.apache.lucene.facet.sortedset.SortedSetDocValuesReaderState) DefaultSortedSetDocValuesReaderState(org.apache.lucene.facet.sortedset.DefaultSortedSetDocValuesReaderState) TwoPhaseIterator(org.apache.lucene.search.TwoPhaseIterator) Term(org.apache.lucene.index.Term) Weight(org.apache.lucene.search.Weight) ConstantScoreWeight(org.apache.lucene.search.ConstantScoreWeight) DrillSidewaysResult(org.apache.lucene.facet.DrillSideways.DrillSidewaysResult) StringField(org.apache.lucene.document.StringField) SortedSetDocValuesFacetField(org.apache.lucene.facet.sortedset.SortedSetDocValuesFacetField) RandomIndexWriter(org.apache.lucene.index.RandomIndexWriter) IndexSearcher(org.apache.lucene.search.IndexSearcher) SortedSetDocValuesFacetField(org.apache.lucene.facet.sortedset.SortedSetDocValuesFacetField) DirectoryTaxonomyWriter(org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyWriter) TopDocs(org.apache.lucene.search.TopDocs) ConstantScoreScorer(org.apache.lucene.search.ConstantScoreScorer) SortedDocValuesField(org.apache.lucene.document.SortedDocValuesField) BytesRef(org.apache.lucene.util.BytesRef) Directory(org.apache.lucene.store.Directory) TermQuery(org.apache.lucene.search.TermQuery) TaxonomyReader(org.apache.lucene.facet.taxonomy.TaxonomyReader) DirectoryTaxonomyReader(org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyReader) IOException(java.io.IOException) MatchAllDocsQuery(org.apache.lucene.search.MatchAllDocsQuery) ConstantScoreWeight(org.apache.lucene.search.ConstantScoreWeight) DefaultSortedSetDocValuesReaderState(org.apache.lucene.facet.sortedset.DefaultSortedSetDocValuesReaderState) IndexReader(org.apache.lucene.index.IndexReader) DocIdSetIterator(org.apache.lucene.search.DocIdSetIterator) IndexWriterConfig(org.apache.lucene.index.IndexWriterConfig)

Example 85 with LeafReaderContext

use of org.apache.lucene.index.LeafReaderContext in project lucene-solr by apache.

the class TestCachedOrdinalsReader method testWithThreads.

@Test
public void testWithThreads() throws Exception {
    // LUCENE-5303: OrdinalsCache used the ThreadLocal BinaryDV instead of reader.getCoreCacheKey().
    Directory indexDir = newDirectory();
    Directory taxoDir = newDirectory();
    IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
    IndexWriter writer = new IndexWriter(indexDir, conf);
    DirectoryTaxonomyWriter taxoWriter = new DirectoryTaxonomyWriter(taxoDir);
    FacetsConfig config = new FacetsConfig();
    Document doc = new Document();
    doc.add(new FacetField("A", "1"));
    writer.addDocument(config.build(taxoWriter, doc));
    doc = new Document();
    doc.add(new FacetField("A", "2"));
    writer.addDocument(config.build(taxoWriter, doc));
    final DirectoryReader reader = DirectoryReader.open(writer);
    final CachedOrdinalsReader ordsReader = new CachedOrdinalsReader(new DocValuesOrdinalsReader(FacetsConfig.DEFAULT_INDEX_FIELD_NAME));
    Thread[] threads = new Thread[3];
    for (int i = 0; i < threads.length; i++) {
        threads[i] = new Thread("CachedOrdsThread-" + i) {

            @Override
            public void run() {
                for (LeafReaderContext context : reader.leaves()) {
                    try {
                        ordsReader.getReader(context);
                    } catch (IOException e) {
                        throw new RuntimeException(e);
                    }
                }
            }
        };
    }
    long ramBytesUsed = 0;
    for (Thread t : threads) {
        t.start();
        t.join();
        if (ramBytesUsed == 0) {
            ramBytesUsed = ordsReader.ramBytesUsed();
        } else {
            assertEquals(ramBytesUsed, ordsReader.ramBytesUsed());
        }
    }
    writer.close();
    IOUtils.close(taxoWriter, reader, indexDir, taxoDir);
}
Also used : FacetsConfig(org.apache.lucene.facet.FacetsConfig) DirectoryReader(org.apache.lucene.index.DirectoryReader) FacetField(org.apache.lucene.facet.FacetField) IOException(java.io.IOException) Document(org.apache.lucene.document.Document) DirectoryTaxonomyWriter(org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyWriter) MockAnalyzer(org.apache.lucene.analysis.MockAnalyzer) IndexWriter(org.apache.lucene.index.IndexWriter) LeafReaderContext(org.apache.lucene.index.LeafReaderContext) Directory(org.apache.lucene.store.Directory) IndexWriterConfig(org.apache.lucene.index.IndexWriterConfig) Test(org.junit.Test)

Aggregations

LeafReaderContext (org.apache.lucene.index.LeafReaderContext)326 LeafReader (org.apache.lucene.index.LeafReader)70 Document (org.apache.lucene.document.Document)68 BytesRef (org.apache.lucene.util.BytesRef)66 IOException (java.io.IOException)65 Directory (org.apache.lucene.store.Directory)57 Term (org.apache.lucene.index.Term)50 IndexSearcher (org.apache.lucene.search.IndexSearcher)47 RandomIndexWriter (org.apache.lucene.index.RandomIndexWriter)45 DirectoryReader (org.apache.lucene.index.DirectoryReader)44 Bits (org.apache.lucene.util.Bits)44 IndexReader (org.apache.lucene.index.IndexReader)43 NumericDocValues (org.apache.lucene.index.NumericDocValues)40 ArrayList (java.util.ArrayList)39 Terms (org.apache.lucene.index.Terms)35 Weight (org.apache.lucene.search.Weight)35 DocIdSetIterator (org.apache.lucene.search.DocIdSetIterator)34 Scorer (org.apache.lucene.search.Scorer)34 IndexWriterConfig (org.apache.lucene.index.IndexWriterConfig)33 TermsEnum (org.apache.lucene.index.TermsEnum)31