use of org.apache.lucene.search.MatchAllDocsQuery in project lucene-solr by apache.
the class TestDrillSideways method testRandom.
public void testRandom() throws Exception {
while (aChance == 0.0) {
aChance = random().nextDouble();
}
while (bChance == 0.0) {
bChance = random().nextDouble();
}
while (cChance == 0.0) {
cChance = random().nextDouble();
}
//aChance = .01;
//bChance = 0.5;
//cChance = 1.0;
double sum = aChance + bChance + cChance;
aChance /= sum;
bChance /= sum;
cChance /= sum;
int numDims = TestUtil.nextInt(random(), 2, 5);
//int numDims = 3;
int numDocs = atLeast(3000);
//int numDocs = 20;
if (VERBOSE) {
System.out.println("numDims=" + numDims + " numDocs=" + numDocs + " aChance=" + aChance + " bChance=" + bChance + " cChance=" + cChance);
}
String[][] dimValues = new String[numDims][];
int valueCount = 2;
for (int dim = 0; dim < numDims; dim++) {
Set<String> values = new HashSet<>();
while (values.size() < valueCount) {
String s = TestUtil.randomRealisticUnicodeString(random());
//String s = _TestUtil.randomString(random());
if (s.length() > 0) {
values.add(s);
}
}
dimValues[dim] = values.toArray(new String[values.size()]);
valueCount *= 2;
}
List<Doc> docs = new ArrayList<>();
for (int i = 0; i < numDocs; i++) {
Doc doc = new Doc();
doc.id = "" + i;
doc.contentToken = randomContentToken(false);
doc.dims = new int[numDims];
doc.dims2 = new int[numDims];
for (int dim = 0; dim < numDims; dim++) {
if (random().nextInt(5) == 3) {
// This doc is missing this dim:
doc.dims[dim] = -1;
} else if (dimValues[dim].length <= 4) {
int dimUpto = 0;
doc.dims[dim] = dimValues[dim].length - 1;
while (dimUpto < dimValues[dim].length) {
if (random().nextBoolean()) {
doc.dims[dim] = dimUpto;
break;
}
dimUpto++;
}
} else {
doc.dims[dim] = random().nextInt(dimValues[dim].length);
}
if (random().nextInt(5) == 3) {
// 2nd value:
doc.dims2[dim] = random().nextInt(dimValues[dim].length);
} else {
doc.dims2[dim] = -1;
}
}
docs.add(doc);
}
Directory d = newDirectory();
Directory td = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
iwc.setInfoStream(InfoStream.NO_OUTPUT);
RandomIndexWriter w = new RandomIndexWriter(random(), d, iwc);
DirectoryTaxonomyWriter tw = new DirectoryTaxonomyWriter(td, IndexWriterConfig.OpenMode.CREATE);
FacetsConfig config = new FacetsConfig();
for (int i = 0; i < numDims; i++) {
config.setMultiValued("dim" + i, true);
}
boolean doUseDV = random().nextBoolean();
for (Doc rawDoc : docs) {
Document doc = new Document();
doc.add(newStringField("id", rawDoc.id, Field.Store.YES));
doc.add(new SortedDocValuesField("id", new BytesRef(rawDoc.id)));
doc.add(newStringField("content", rawDoc.contentToken, Field.Store.NO));
if (VERBOSE) {
System.out.println(" doc id=" + rawDoc.id + " token=" + rawDoc.contentToken);
}
for (int dim = 0; dim < numDims; dim++) {
int dimValue = rawDoc.dims[dim];
if (dimValue != -1) {
if (doUseDV) {
doc.add(new SortedSetDocValuesFacetField("dim" + dim, dimValues[dim][dimValue]));
} else {
doc.add(new FacetField("dim" + dim, dimValues[dim][dimValue]));
}
doc.add(new StringField("dim" + dim, dimValues[dim][dimValue], Field.Store.YES));
if (VERBOSE) {
System.out.println(" dim" + dim + "=" + new BytesRef(dimValues[dim][dimValue]));
}
}
int dimValue2 = rawDoc.dims2[dim];
if (dimValue2 != -1) {
if (doUseDV) {
doc.add(new SortedSetDocValuesFacetField("dim" + dim, dimValues[dim][dimValue2]));
} else {
doc.add(new FacetField("dim" + dim, dimValues[dim][dimValue2]));
}
doc.add(new StringField("dim" + dim, dimValues[dim][dimValue2], Field.Store.YES));
if (VERBOSE) {
System.out.println(" dim" + dim + "=" + new BytesRef(dimValues[dim][dimValue2]));
}
}
}
w.addDocument(config.build(tw, doc));
}
if (random().nextBoolean()) {
// Randomly delete a few docs:
int numDel = TestUtil.nextInt(random(), 1, (int) (numDocs * 0.05));
if (VERBOSE) {
System.out.println("delete " + numDel);
}
int delCount = 0;
while (delCount < numDel) {
Doc doc = docs.get(random().nextInt(docs.size()));
if (!doc.deleted) {
if (VERBOSE) {
System.out.println(" delete id=" + doc.id);
}
doc.deleted = true;
w.deleteDocuments(new Term("id", doc.id));
delCount++;
}
}
}
if (random().nextBoolean()) {
if (VERBOSE) {
System.out.println("TEST: forceMerge(1)...");
}
w.forceMerge(1);
}
IndexReader r = w.getReader();
final SortedSetDocValuesReaderState sortedSetDVState;
IndexSearcher s = newSearcher(r);
if (doUseDV) {
sortedSetDVState = new DefaultSortedSetDocValuesReaderState(s.getIndexReader());
} else {
sortedSetDVState = null;
}
if (VERBOSE) {
System.out.println("r.numDocs() = " + r.numDocs());
}
// NRT open
TaxonomyReader tr = new DirectoryTaxonomyReader(tw);
int numIters = atLeast(10);
for (int iter = 0; iter < numIters; iter++) {
String contentToken = random().nextInt(30) == 17 ? null : randomContentToken(true);
int numDrillDown = TestUtil.nextInt(random(), 1, Math.min(4, numDims));
if (VERBOSE) {
System.out.println("\nTEST: iter=" + iter + " baseQuery=" + contentToken + " numDrillDown=" + numDrillDown + " useSortedSetDV=" + doUseDV);
}
String[][] drillDowns = new String[numDims][];
int count = 0;
boolean anyMultiValuedDrillDowns = false;
while (count < numDrillDown) {
int dim = random().nextInt(numDims);
if (drillDowns[dim] == null) {
if (random().nextBoolean()) {
// Drill down on one value:
drillDowns[dim] = new String[] { dimValues[dim][random().nextInt(dimValues[dim].length)] };
} else {
int orCount = TestUtil.nextInt(random(), 1, Math.min(5, dimValues[dim].length));
drillDowns[dim] = new String[orCount];
anyMultiValuedDrillDowns |= orCount > 1;
for (int i = 0; i < orCount; i++) {
while (true) {
String value = dimValues[dim][random().nextInt(dimValues[dim].length)];
for (int j = 0; j < i; j++) {
if (value.equals(drillDowns[dim][j])) {
value = null;
break;
}
}
if (value != null) {
drillDowns[dim][i] = value;
break;
}
}
}
}
if (VERBOSE) {
BytesRef[] values = new BytesRef[drillDowns[dim].length];
for (int i = 0; i < values.length; i++) {
values[i] = new BytesRef(drillDowns[dim][i]);
}
System.out.println(" dim" + dim + "=" + Arrays.toString(values));
}
count++;
}
}
Query baseQuery;
if (contentToken == null) {
baseQuery = new MatchAllDocsQuery();
} else {
baseQuery = new TermQuery(new Term("content", contentToken));
}
DrillDownQuery ddq = new DrillDownQuery(config, baseQuery);
for (int dim = 0; dim < numDims; dim++) {
if (drillDowns[dim] != null) {
for (String value : drillDowns[dim]) {
ddq.add("dim" + dim, value);
}
}
}
Query filter;
if (random().nextInt(7) == 6) {
if (VERBOSE) {
System.out.println(" only-even filter");
}
filter = new Query() {
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
return new ConstantScoreWeight(this, boost) {
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
DocIdSetIterator approximation = DocIdSetIterator.all(context.reader().maxDoc());
return new ConstantScoreScorer(this, score(), new TwoPhaseIterator(approximation) {
@Override
public boolean matches() throws IOException {
int docID = approximation.docID();
return (Integer.parseInt(context.reader().document(docID).get("id")) & 1) == 0;
}
@Override
public float matchCost() {
return 1000f;
}
});
}
};
}
@Override
public String toString(String field) {
return "drillSidewaysTestFilter";
}
@Override
public boolean equals(Object o) {
return o == this;
}
@Override
public int hashCode() {
return System.identityHashCode(this);
}
};
} else {
filter = null;
}
// Verify docs are always collected in order. If we
// had an AssertingScorer it could catch it when
// Weight.scoresDocsOutOfOrder lies!:
getNewDrillSideways(s, config, tr).search(ddq, new SimpleCollector() {
int lastDocID;
@Override
public void collect(int doc) {
assert doc > lastDocID;
lastDocID = doc;
}
@Override
protected void doSetNextReader(LeafReaderContext context) throws IOException {
lastDocID = -1;
}
@Override
public boolean needsScores() {
return false;
}
});
// subScorers are on the same docID:
if (!anyMultiValuedDrillDowns) {
// Can only do this test when there are no OR'd
// drill-down values, because in that case it's
// easily possible for one of the DD terms to be on
// a future docID:
getNewDrillSidewaysScoreSubdocsAtOnce(s, config, tr).search(ddq, new AssertingSubDocsAtOnceCollector());
}
TestFacetResult expected = slowDrillSidewaysSearch(s, docs, contentToken, drillDowns, dimValues, filter);
Sort sort = new Sort(new SortField("id", SortField.Type.STRING));
DrillSideways ds;
if (doUseDV) {
ds = getNewDrillSideways(s, config, sortedSetDVState);
} else {
ds = getNewDrillSidewaysBuildFacetsResult(s, config, tr);
}
// Retrieve all facets:
DrillSidewaysResult actual = ds.search(ddq, filter, null, numDocs, sort, true, true);
TopDocs hits = s.search(baseQuery, numDocs);
Map<String, Float> scores = new HashMap<>();
for (ScoreDoc sd : hits.scoreDocs) {
scores.put(s.doc(sd.doc).get("id"), sd.score);
}
if (VERBOSE) {
System.out.println(" verify all facets");
}
verifyEquals(dimValues, s, expected, actual, scores, doUseDV);
// Make sure drill down doesn't change score:
Query q = ddq;
if (filter != null) {
q = new BooleanQuery.Builder().add(q, Occur.MUST).add(filter, Occur.FILTER).build();
}
TopDocs ddqHits = s.search(q, numDocs);
assertEquals(expected.hits.size(), ddqHits.totalHits);
for (int i = 0; i < expected.hits.size(); i++) {
// Score should be IDENTICAL:
assertEquals(scores.get(expected.hits.get(i).id), ddqHits.scoreDocs[i].score, 0.0f);
}
}
w.close();
IOUtils.close(r, tr, tw, d, td);
}
use of org.apache.lucene.search.MatchAllDocsQuery in project lucene-solr by apache.
the class TestOrdinalMappingLeafReader method verifyResults.
private void verifyResults(Directory indexDir, Directory taxoDir) throws IOException {
DirectoryReader indexReader = DirectoryReader.open(indexDir);
DirectoryTaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoDir);
IndexSearcher searcher = newSearcher(indexReader);
FacetsCollector collector = new FacetsCollector();
FacetsCollector.search(searcher, new MatchAllDocsQuery(), 10, collector);
// tag facets
Facets tagFacets = new FastTaxonomyFacetCounts("$tags", taxoReader, facetConfig, collector);
FacetResult result = tagFacets.getTopChildren(10, "tag");
for (LabelAndValue lv : result.labelValues) {
if (VERBOSE) {
System.out.println(lv);
}
assertEquals(NUM_DOCS, lv.value.intValue());
}
// id facets
Facets idFacets = new FastTaxonomyFacetCounts(taxoReader, facetConfig, collector);
FacetResult idResult = idFacets.getTopChildren(10, "id");
assertEquals(NUM_DOCS, idResult.childCount);
// each "id" appears twice
assertEquals(NUM_DOCS * 2, idResult.value);
BinaryDocValues bdv = MultiDocValues.getBinaryValues(indexReader, "bdv");
BinaryDocValues cbdv = MultiDocValues.getBinaryValues(indexReader, "cbdv");
for (int i = 0; i < indexReader.maxDoc(); i++) {
assertEquals(i, bdv.nextDoc());
assertEquals(i, cbdv.nextDoc());
assertEquals(Integer.parseInt(cbdv.binaryValue().utf8ToString()), Integer.parseInt(bdv.binaryValue().utf8ToString()) * 2);
}
IOUtils.close(indexReader, taxoReader);
}
use of org.apache.lucene.search.MatchAllDocsQuery in project lucene-solr by apache.
the class TestSearcherTaxonomyManager method testDirectory.
public void testDirectory() throws Exception {
Directory indexDir = newDirectory();
Directory taxoDir = newDirectory();
final IndexWriter w = new IndexWriter(indexDir, newIndexWriterConfig(new MockAnalyzer(random())));
final DirectoryTaxonomyWriter tw = new DirectoryTaxonomyWriter(taxoDir);
// first empty commit
w.commit();
tw.commit();
final SearcherTaxonomyManager mgr = new SearcherTaxonomyManager(indexDir, taxoDir, null);
final FacetsConfig config = new FacetsConfig();
config.setMultiValued("field", true);
final AtomicBoolean stop = new AtomicBoolean();
// How many unique facets to index before stopping:
final int ordLimit = TEST_NIGHTLY ? 100000 : 6000;
Thread indexer = new IndexerThread(w, config, tw, mgr, ordLimit, stop);
indexer.start();
try {
while (!stop.get()) {
SearcherAndTaxonomy pair = mgr.acquire();
try {
//System.out.println("search maxOrd=" + pair.taxonomyReader.getSize());
FacetsCollector sfc = new FacetsCollector();
pair.searcher.search(new MatchAllDocsQuery(), sfc);
Facets facets = getTaxonomyFacetCounts(pair.taxonomyReader, config, sfc);
FacetResult result = facets.getTopChildren(10, "field");
if (pair.searcher.getIndexReader().numDocs() > 0) {
//System.out.println(pair.taxonomyReader.getSize());
assertTrue(result.childCount > 0);
assertTrue(result.labelValues.length > 0);
}
//if (VERBOSE) {
//System.out.println("TEST: facets=" + FacetTestUtils.toString(results.get(0)));
//}
} finally {
mgr.release(pair);
}
}
} finally {
indexer.join();
}
if (VERBOSE) {
System.out.println("TEST: now stop");
}
w.close();
IOUtils.close(mgr, tw, taxoDir, indexDir);
}
use of org.apache.lucene.search.MatchAllDocsQuery in project lucene-solr by apache.
the class TestRangeFacetCounts method testCustomDoubleValuesSource.
public void testCustomDoubleValuesSource() throws Exception {
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
Document doc = new Document();
writer.addDocument(doc);
writer.addDocument(doc);
writer.addDocument(doc);
// Test wants 3 docs in one segment:
writer.forceMerge(1);
final DoubleValuesSource vs = new DoubleValuesSource() {
@Override
public DoubleValues getValues(LeafReaderContext ctx, DoubleValues scores) throws IOException {
return new DoubleValues() {
int doc = -1;
@Override
public double doubleValue() throws IOException {
return doc + 1;
}
@Override
public boolean advanceExact(int doc) throws IOException {
this.doc = doc;
return true;
}
};
}
@Override
public boolean needsScores() {
return false;
}
@Override
public Explanation explain(LeafReaderContext ctx, int docId, Explanation scoreExplanation) throws IOException {
return Explanation.match(docId + 1, "");
}
};
FacetsConfig config = new FacetsConfig();
FacetsCollector fc = new FacetsCollector();
IndexReader r = writer.getReader();
IndexSearcher s = newSearcher(r);
s.search(new MatchAllDocsQuery(), fc);
final DoubleRange[] ranges = new DoubleRange[] { new DoubleRange("< 1", 0.0, true, 1.0, false), new DoubleRange("< 2", 0.0, true, 2.0, false), new DoubleRange("< 5", 0.0, true, 5.0, false), new DoubleRange("< 10", 0.0, true, 10.0, false), new DoubleRange("< 20", 0.0, true, 20.0, false), new DoubleRange("< 50", 0.0, true, 50.0, false) };
final Query fastMatchFilter;
final AtomicBoolean filterWasUsed = new AtomicBoolean();
if (random().nextBoolean()) {
// Sort of silly:
final Query in = new MatchAllDocsQuery();
fastMatchFilter = new UsedQuery(in, filterWasUsed);
} else {
fastMatchFilter = null;
}
if (VERBOSE) {
System.out.println("TEST: fastMatchFilter=" + fastMatchFilter);
}
Facets facets = new DoubleRangeFacetCounts("field", vs, fc, fastMatchFilter, ranges);
assertEquals("dim=field path=[] value=3 childCount=6\n < 1 (0)\n < 2 (1)\n < 5 (3)\n < 10 (3)\n < 20 (3)\n < 50 (3)\n", facets.getTopChildren(10, "field").toString());
assertTrue(fastMatchFilter == null || filterWasUsed.get());
DrillDownQuery ddq = new DrillDownQuery(config);
ddq.add("field", ranges[1].getQuery(fastMatchFilter, vs));
// Test simple drill-down:
assertEquals(1, s.search(ddq, 10).totalHits);
// Test drill-sideways after drill-down
DrillSideways ds = new DrillSideways(s, config, (TaxonomyReader) null) {
@Override
protected Facets buildFacetsResult(FacetsCollector drillDowns, FacetsCollector[] drillSideways, String[] drillSidewaysDims) throws IOException {
assert drillSideways.length == 1;
return new DoubleRangeFacetCounts("field", vs, drillSideways[0], fastMatchFilter, ranges);
}
@Override
protected boolean scoreSubDocsAtOnce() {
return random().nextBoolean();
}
};
DrillSidewaysResult dsr = ds.search(ddq, 10);
assertEquals(1, dsr.hits.totalHits);
assertEquals("dim=field path=[] value=3 childCount=6\n < 1 (0)\n < 2 (1)\n < 5 (3)\n < 10 (3)\n < 20 (3)\n < 50 (3)\n", dsr.facets.getTopChildren(10, "field").toString());
writer.close();
IOUtils.close(r, dir);
}
use of org.apache.lucene.search.MatchAllDocsQuery in project lucene-solr by apache.
the class TestTaxonomyFacetCounts method testWrongIndexFieldName.
public void testWrongIndexFieldName() throws Exception {
Directory dir = newDirectory();
Directory taxoDir = newDirectory();
// Writes facet ords to a separate directory from the
// main index:
DirectoryTaxonomyWriter taxoWriter = new DirectoryTaxonomyWriter(taxoDir, IndexWriterConfig.OpenMode.CREATE);
FacetsConfig config = new FacetsConfig();
config.setIndexFieldName("a", "$facets2");
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
Document doc = new Document();
doc.add(new FacetField("a", "foo1"));
writer.addDocument(config.build(taxoWriter, doc));
// NRT open
IndexSearcher searcher = newSearcher(writer.getReader());
// NRT open
TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoWriter);
FacetsCollector c = new FacetsCollector();
searcher.search(new MatchAllDocsQuery(), c);
// Uses default $facets field:
Facets facets;
if (random().nextBoolean()) {
facets = new FastTaxonomyFacetCounts(taxoReader, config, c);
} else {
OrdinalsReader ordsReader = new DocValuesOrdinalsReader();
if (random().nextBoolean()) {
ordsReader = new CachedOrdinalsReader(ordsReader);
}
facets = new TaxonomyFacetCounts(ordsReader, taxoReader, config, c);
}
// Ask for top 10 labels for any dims that have counts:
List<FacetResult> results = facets.getAllDims(10);
assertTrue(results.isEmpty());
expectThrows(IllegalArgumentException.class, () -> {
facets.getSpecificValue("a");
});
expectThrows(IllegalArgumentException.class, () -> {
facets.getTopChildren(10, "a");
});
writer.close();
IOUtils.close(taxoWriter, searcher.getIndexReader(), taxoReader, taxoDir, dir);
}
Aggregations