use of org.apache.lucene.index.MultiReader in project lucene-solr by apache.
the class TestBooleanRewrites method testDeduplicateMustAndFilter.
public void testDeduplicateMustAndFilter() throws IOException {
IndexSearcher searcher = newSearcher(new MultiReader());
BooleanQuery bq = new BooleanQuery.Builder().add(new TermQuery(new Term("foo", "bar")), Occur.MUST).add(new TermQuery(new Term("foo", "bar")), Occur.FILTER).build();
assertEquals(new TermQuery(new Term("foo", "bar")), searcher.rewrite(bq));
bq = new BooleanQuery.Builder().add(new TermQuery(new Term("foo", "bar")), Occur.MUST).add(new TermQuery(new Term("foo", "bar")), Occur.FILTER).add(new TermQuery(new Term("foo", "baz")), Occur.FILTER).build();
BooleanQuery expected = new BooleanQuery.Builder().add(new TermQuery(new Term("foo", "bar")), Occur.MUST).add(new TermQuery(new Term("foo", "baz")), Occur.FILTER).build();
assertEquals(expected, searcher.rewrite(bq));
}
use of org.apache.lucene.index.MultiReader in project lucene-solr by apache.
the class TestBooleanRewrites method testSingleMustMatchAllWithShouldClauses.
public void testSingleMustMatchAllWithShouldClauses() throws IOException {
IndexSearcher searcher = newSearcher(new MultiReader());
BooleanQuery bq = new BooleanQuery.Builder().add(new MatchAllDocsQuery(), Occur.MUST).add(new TermQuery(new Term("foo", "bar")), Occur.FILTER).add(new TermQuery(new Term("foo", "baz")), Occur.SHOULD).add(new TermQuery(new Term("foo", "quux")), Occur.SHOULD).build();
BooleanQuery expected = new BooleanQuery.Builder().add(new ConstantScoreQuery(new TermQuery(new Term("foo", "bar"))), Occur.MUST).add(new TermQuery(new Term("foo", "baz")), Occur.SHOULD).add(new TermQuery(new Term("foo", "quux")), Occur.SHOULD).build();
assertEquals(expected, searcher.rewrite(bq));
}
use of org.apache.lucene.index.MultiReader in project jackrabbit-oak by apache.
the class IndexNode method createReader.
private IndexReader createReader(List<LuceneIndexReader> nrtReaders) {
if (readers.size() == 1 && nrtReaders.isEmpty()) {
return readers.get(0).getReader();
}
if (nrtReaders.size() == 1 && readers.isEmpty()) {
return nrtReaders.get(0).getReader();
}
IndexReader[] readerArr = new IndexReader[readers.size() + nrtReaders.size()];
int i = 0;
for (LuceneIndexReader r : Iterables.concat(readers, nrtReaders)) {
readerArr[i++] = r.getReader();
}
return new MultiReader(readerArr, true);
}
use of org.apache.lucene.index.MultiReader in project lucene-solr by apache.
the class TestMultiTermQueryRewrites method beforeClass.
@BeforeClass
public static void beforeClass() throws Exception {
dir = newDirectory();
sdir1 = newDirectory();
sdir2 = newDirectory();
final RandomIndexWriter writer = new RandomIndexWriter(random(), dir, new MockAnalyzer(random()));
final RandomIndexWriter swriter1 = new RandomIndexWriter(random(), sdir1, new MockAnalyzer(random()));
final RandomIndexWriter swriter2 = new RandomIndexWriter(random(), sdir2, new MockAnalyzer(random()));
for (int i = 0; i < 10; i++) {
Document doc = new Document();
doc.add(newStringField("data", Integer.toString(i), Field.Store.NO));
writer.addDocument(doc);
((i % 2 == 0) ? swriter1 : swriter2).addDocument(doc);
}
writer.forceMerge(1);
swriter1.forceMerge(1);
swriter2.forceMerge(1);
writer.close();
swriter1.close();
swriter2.close();
reader = DirectoryReader.open(dir);
searcher = newSearcher(reader);
multiReader = new MultiReader(new IndexReader[] { DirectoryReader.open(sdir1), DirectoryReader.open(sdir2) }, true);
multiSearcher = newSearcher(multiReader);
multiReaderDupls = new MultiReader(new IndexReader[] { DirectoryReader.open(sdir1), DirectoryReader.open(dir) }, true);
multiSearcherDupls = newSearcher(multiReaderDupls);
}
use of org.apache.lucene.index.MultiReader in project lucene-solr by apache.
the class TestShardSearching method testSimple.
public void testSimple() throws Exception {
final int numNodes = TestUtil.nextInt(random(), 1, 10);
final double runTimeSec = atLeast(3);
final int minDocsToMakeTerms = TestUtil.nextInt(random(), 5, 20);
final int maxSearcherAgeSeconds = TestUtil.nextInt(random(), 1, 3);
if (VERBOSE) {
System.out.println("TEST: numNodes=" + numNodes + " runTimeSec=" + runTimeSec + " maxSearcherAgeSeconds=" + maxSearcherAgeSeconds);
}
start(numNodes, runTimeSec, maxSearcherAgeSeconds);
final List<PreviousSearchState> priorSearches = new ArrayList<>();
List<BytesRef> terms = null;
while (System.nanoTime() < endTimeNanos) {
final boolean doFollowon = priorSearches.size() > 0 && random().nextInt(7) == 1;
// Pick a random node; we will run the query on this node:
final int myNodeID = random().nextInt(numNodes);
final NodeState.ShardIndexSearcher localShardSearcher;
final PreviousSearchState prevSearchState;
if (doFollowon) {
// Pretend user issued a followon query:
prevSearchState = priorSearches.get(random().nextInt(priorSearches.size()));
if (VERBOSE) {
System.out.println("\nTEST: follow-on query age=" + ((System.nanoTime() - prevSearchState.searchTimeNanos) / 1000000000.0));
}
try {
localShardSearcher = nodes[myNodeID].acquire(prevSearchState.versions);
} catch (SearcherExpiredException see) {
// searcher w/o telling them...
if (VERBOSE) {
System.out.println(" searcher expired during local shard searcher init: " + see);
}
priorSearches.remove(prevSearchState);
continue;
}
} else {
if (VERBOSE) {
System.out.println("\nTEST: fresh query");
}
// Do fresh query:
localShardSearcher = nodes[myNodeID].acquire();
prevSearchState = null;
}
final IndexReader[] subs = new IndexReader[numNodes];
PreviousSearchState searchState = null;
try {
// Mock: now make a single reader (MultiReader) from all node
// searchers. In a real shard env you can't do this... we
// do it to confirm results from the shard searcher
// are correct:
int docCount = 0;
try {
for (int nodeID = 0; nodeID < numNodes; nodeID++) {
final long subVersion = localShardSearcher.nodeVersions[nodeID];
final IndexSearcher sub = nodes[nodeID].searchers.acquire(subVersion);
if (sub == null) {
nodeID--;
while (nodeID >= 0) {
subs[nodeID].decRef();
subs[nodeID] = null;
nodeID--;
}
throw new SearcherExpiredException("nodeID=" + nodeID + " version=" + subVersion);
}
subs[nodeID] = sub.getIndexReader();
docCount += subs[nodeID].maxDoc();
}
} catch (SearcherExpiredException see) {
// Expected
if (VERBOSE) {
System.out.println(" searcher expired during mock reader init: " + see);
}
continue;
}
final IndexReader mockReader = new MultiReader(subs);
final IndexSearcher mockSearcher = new IndexSearcher(mockReader);
Query query;
Sort sort;
if (prevSearchState != null) {
query = prevSearchState.query;
sort = prevSearchState.sort;
} else {
if (terms == null && docCount > minDocsToMakeTerms) {
// TODO: try to "focus" on high freq terms sometimes too
// TODO: maybe also periodically reset the terms...?
final TermsEnum termsEnum = MultiFields.getTerms(mockReader, "body").iterator();
terms = new ArrayList<>();
while (termsEnum.next() != null) {
terms.add(BytesRef.deepCopyOf(termsEnum.term()));
}
if (VERBOSE) {
System.out.println("TEST: init terms: " + terms.size() + " terms");
}
if (terms.size() == 0) {
terms = null;
}
}
if (VERBOSE) {
System.out.println(" maxDoc=" + mockReader.maxDoc());
}
if (terms != null) {
if (random().nextBoolean()) {
query = new TermQuery(new Term("body", terms.get(random().nextInt(terms.size()))));
} else {
final String t = terms.get(random().nextInt(terms.size())).utf8ToString();
final String prefix;
if (t.length() <= 1) {
prefix = t;
} else {
prefix = t.substring(0, TestUtil.nextInt(random(), 1, 2));
}
query = new PrefixQuery(new Term("body", prefix));
}
if (random().nextBoolean()) {
sort = null;
} else {
// TODO: sort by more than 1 field
final int what = random().nextInt(3);
if (what == 0) {
sort = new Sort(SortField.FIELD_SCORE);
} else if (what == 1) {
// TODO: this sort doesn't merge
// correctly... it's tricky because you
// could have > 2.1B docs across all shards:
//sort = new Sort(SortField.FIELD_DOC);
sort = null;
} else if (what == 2) {
sort = new Sort(new SortField[] { new SortField("docid_intDV", SortField.Type.INT, random().nextBoolean()) });
} else {
sort = new Sort(new SortField[] { new SortField("titleDV", SortField.Type.STRING, random().nextBoolean()) });
}
}
} else {
query = null;
sort = null;
}
}
if (query != null) {
try {
searchState = assertSame(mockSearcher, localShardSearcher, query, sort, prevSearchState);
} catch (SearcherExpiredException see) {
// searcher w/o telling them...
if (VERBOSE) {
System.out.println(" searcher expired during search: " + see);
see.printStackTrace(System.out);
}
// assert prevSearchState != null;
if (prevSearchState != null) {
priorSearches.remove(prevSearchState);
}
}
}
} finally {
nodes[myNodeID].release(localShardSearcher);
for (IndexReader sub : subs) {
if (sub != null) {
sub.decRef();
}
}
}
if (searchState != null && searchState.searchAfterLocal != null && random().nextInt(5) == 3) {
priorSearches.add(searchState);
if (priorSearches.size() > 200) {
Collections.shuffle(priorSearches, random());
priorSearches.subList(100, priorSearches.size()).clear();
}
}
}
finish();
}
Aggregations