use of org.apache.lucene.facet.FacetsCollector in project lucene-solr by apache.
the class TestSearcherTaxonomyManager method testDirectory.
public void testDirectory() throws Exception {
Directory indexDir = newDirectory();
Directory taxoDir = newDirectory();
final IndexWriter w = new IndexWriter(indexDir, newIndexWriterConfig(new MockAnalyzer(random())));
final DirectoryTaxonomyWriter tw = new DirectoryTaxonomyWriter(taxoDir);
// first empty commit
w.commit();
tw.commit();
final SearcherTaxonomyManager mgr = new SearcherTaxonomyManager(indexDir, taxoDir, null);
final FacetsConfig config = new FacetsConfig();
config.setMultiValued("field", true);
final AtomicBoolean stop = new AtomicBoolean();
// How many unique facets to index before stopping:
final int ordLimit = TEST_NIGHTLY ? 100000 : 6000;
Thread indexer = new IndexerThread(w, config, tw, mgr, ordLimit, stop);
indexer.start();
try {
while (!stop.get()) {
SearcherAndTaxonomy pair = mgr.acquire();
try {
//System.out.println("search maxOrd=" + pair.taxonomyReader.getSize());
FacetsCollector sfc = new FacetsCollector();
pair.searcher.search(new MatchAllDocsQuery(), sfc);
Facets facets = getTaxonomyFacetCounts(pair.taxonomyReader, config, sfc);
FacetResult result = facets.getTopChildren(10, "field");
if (pair.searcher.getIndexReader().numDocs() > 0) {
//System.out.println(pair.taxonomyReader.getSize());
assertTrue(result.childCount > 0);
assertTrue(result.labelValues.length > 0);
}
//if (VERBOSE) {
//System.out.println("TEST: facets=" + FacetTestUtils.toString(results.get(0)));
//}
} finally {
mgr.release(pair);
}
}
} finally {
indexer.join();
}
if (VERBOSE) {
System.out.println("TEST: now stop");
}
w.close();
IOUtils.close(mgr, tw, taxoDir, indexDir);
}
use of org.apache.lucene.facet.FacetsCollector in project lucene-solr by apache.
the class TestRangeFacetCounts method testCustomDoubleValuesSource.
public void testCustomDoubleValuesSource() throws Exception {
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
Document doc = new Document();
writer.addDocument(doc);
writer.addDocument(doc);
writer.addDocument(doc);
// Test wants 3 docs in one segment:
writer.forceMerge(1);
final DoubleValuesSource vs = new DoubleValuesSource() {
@Override
public DoubleValues getValues(LeafReaderContext ctx, DoubleValues scores) throws IOException {
return new DoubleValues() {
int doc = -1;
@Override
public double doubleValue() throws IOException {
return doc + 1;
}
@Override
public boolean advanceExact(int doc) throws IOException {
this.doc = doc;
return true;
}
};
}
@Override
public boolean needsScores() {
return false;
}
@Override
public Explanation explain(LeafReaderContext ctx, int docId, Explanation scoreExplanation) throws IOException {
return Explanation.match(docId + 1, "");
}
};
FacetsConfig config = new FacetsConfig();
FacetsCollector fc = new FacetsCollector();
IndexReader r = writer.getReader();
IndexSearcher s = newSearcher(r);
s.search(new MatchAllDocsQuery(), fc);
final DoubleRange[] ranges = new DoubleRange[] { new DoubleRange("< 1", 0.0, true, 1.0, false), new DoubleRange("< 2", 0.0, true, 2.0, false), new DoubleRange("< 5", 0.0, true, 5.0, false), new DoubleRange("< 10", 0.0, true, 10.0, false), new DoubleRange("< 20", 0.0, true, 20.0, false), new DoubleRange("< 50", 0.0, true, 50.0, false) };
final Query fastMatchFilter;
final AtomicBoolean filterWasUsed = new AtomicBoolean();
if (random().nextBoolean()) {
// Sort of silly:
final Query in = new MatchAllDocsQuery();
fastMatchFilter = new UsedQuery(in, filterWasUsed);
} else {
fastMatchFilter = null;
}
if (VERBOSE) {
System.out.println("TEST: fastMatchFilter=" + fastMatchFilter);
}
Facets facets = new DoubleRangeFacetCounts("field", vs, fc, fastMatchFilter, ranges);
assertEquals("dim=field path=[] value=3 childCount=6\n < 1 (0)\n < 2 (1)\n < 5 (3)\n < 10 (3)\n < 20 (3)\n < 50 (3)\n", facets.getTopChildren(10, "field").toString());
assertTrue(fastMatchFilter == null || filterWasUsed.get());
DrillDownQuery ddq = new DrillDownQuery(config);
ddq.add("field", ranges[1].getQuery(fastMatchFilter, vs));
// Test simple drill-down:
assertEquals(1, s.search(ddq, 10).totalHits);
// Test drill-sideways after drill-down
DrillSideways ds = new DrillSideways(s, config, (TaxonomyReader) null) {
@Override
protected Facets buildFacetsResult(FacetsCollector drillDowns, FacetsCollector[] drillSideways, String[] drillSidewaysDims) throws IOException {
assert drillSideways.length == 1;
return new DoubleRangeFacetCounts("field", vs, drillSideways[0], fastMatchFilter, ranges);
}
@Override
protected boolean scoreSubDocsAtOnce() {
return random().nextBoolean();
}
};
DrillSidewaysResult dsr = ds.search(ddq, 10);
assertEquals(1, dsr.hits.totalHits);
assertEquals("dim=field path=[] value=3 childCount=6\n < 1 (0)\n < 2 (1)\n < 5 (3)\n < 10 (3)\n < 20 (3)\n < 50 (3)\n", dsr.facets.getTopChildren(10, "field").toString());
writer.close();
IOUtils.close(r, dir);
}
use of org.apache.lucene.facet.FacetsCollector in project lucene-solr by apache.
the class TestSortedSetDocValuesFacets method testRandom.
public void testRandom() throws Exception {
String[] tokens = getRandomTokens(10);
Directory indexDir = newDirectory();
Directory taxoDir = newDirectory();
RandomIndexWriter w = new RandomIndexWriter(random(), indexDir);
FacetsConfig config = new FacetsConfig();
int numDocs = atLeast(1000);
int numDims = TestUtil.nextInt(random(), 1, 7);
List<TestDoc> testDocs = getRandomDocs(tokens, numDocs, numDims);
for (TestDoc testDoc : testDocs) {
Document doc = new Document();
doc.add(newStringField("content", testDoc.content, Field.Store.NO));
for (int j = 0; j < numDims; j++) {
if (testDoc.dims[j] != null) {
doc.add(new SortedSetDocValuesFacetField("dim" + j, testDoc.dims[j]));
}
}
w.addDocument(config.build(doc));
}
// NRT open
IndexSearcher searcher = newSearcher(w.getReader());
// Per-top-reader state:
SortedSetDocValuesReaderState state = new DefaultSortedSetDocValuesReaderState(searcher.getIndexReader());
ExecutorService exec = randomExecutorServiceOrNull();
int iters = atLeast(100);
for (int iter = 0; iter < iters; iter++) {
String searchToken = tokens[random().nextInt(tokens.length)];
if (VERBOSE) {
System.out.println("\nTEST: iter content=" + searchToken);
}
FacetsCollector fc = new FacetsCollector();
FacetsCollector.search(searcher, new TermQuery(new Term("content", searchToken)), 10, fc);
Facets facets;
if (exec != null) {
facets = new ConcurrentSortedSetDocValuesFacetCounts(state, fc, exec);
} else {
facets = new SortedSetDocValuesFacetCounts(state, fc);
}
// Slow, yet hopefully bug-free, faceting:
@SuppressWarnings({ "rawtypes", "unchecked" }) Map<String, Integer>[] expectedCounts = new HashMap[numDims];
for (int i = 0; i < numDims; i++) {
expectedCounts[i] = new HashMap<>();
}
for (TestDoc doc : testDocs) {
if (doc.content.equals(searchToken)) {
for (int j = 0; j < numDims; j++) {
if (doc.dims[j] != null) {
Integer v = expectedCounts[j].get(doc.dims[j]);
if (v == null) {
expectedCounts[j].put(doc.dims[j], 1);
} else {
expectedCounts[j].put(doc.dims[j], v.intValue() + 1);
}
}
}
}
}
List<FacetResult> expected = new ArrayList<>();
for (int i = 0; i < numDims; i++) {
List<LabelAndValue> labelValues = new ArrayList<>();
int totCount = 0;
for (Map.Entry<String, Integer> ent : expectedCounts[i].entrySet()) {
labelValues.add(new LabelAndValue(ent.getKey(), ent.getValue()));
totCount += ent.getValue();
}
sortLabelValues(labelValues);
if (totCount > 0) {
expected.add(new FacetResult("dim" + i, new String[0], totCount, labelValues.toArray(new LabelAndValue[labelValues.size()]), labelValues.size()));
}
}
// Sort by highest value, tie break by value:
sortFacetResults(expected);
List<FacetResult> actual = facets.getAllDims(10);
// Messy: fixup ties
//sortTies(actual);
assertEquals(expected, actual);
}
if (exec != null) {
exec.shutdownNow();
}
w.close();
IOUtils.close(searcher.getIndexReader(), indexDir, taxoDir);
}
use of org.apache.lucene.facet.FacetsCollector in project lucene-solr by apache.
the class TestTaxonomyFacetCounts method testWrongIndexFieldName.
public void testWrongIndexFieldName() throws Exception {
Directory dir = newDirectory();
Directory taxoDir = newDirectory();
// Writes facet ords to a separate directory from the
// main index:
DirectoryTaxonomyWriter taxoWriter = new DirectoryTaxonomyWriter(taxoDir, IndexWriterConfig.OpenMode.CREATE);
FacetsConfig config = new FacetsConfig();
config.setIndexFieldName("a", "$facets2");
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
Document doc = new Document();
doc.add(new FacetField("a", "foo1"));
writer.addDocument(config.build(taxoWriter, doc));
// NRT open
IndexSearcher searcher = newSearcher(writer.getReader());
// NRT open
TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoWriter);
FacetsCollector c = new FacetsCollector();
searcher.search(new MatchAllDocsQuery(), c);
// Uses default $facets field:
Facets facets;
if (random().nextBoolean()) {
facets = new FastTaxonomyFacetCounts(taxoReader, config, c);
} else {
OrdinalsReader ordsReader = new DocValuesOrdinalsReader();
if (random().nextBoolean()) {
ordsReader = new CachedOrdinalsReader(ordsReader);
}
facets = new TaxonomyFacetCounts(ordsReader, taxoReader, config, c);
}
// Ask for top 10 labels for any dims that have counts:
List<FacetResult> results = facets.getAllDims(10);
assertTrue(results.isEmpty());
expectThrows(IllegalArgumentException.class, () -> {
facets.getSpecificValue("a");
});
expectThrows(IllegalArgumentException.class, () -> {
facets.getTopChildren(10, "a");
});
writer.close();
IOUtils.close(taxoWriter, searcher.getIndexReader(), taxoReader, taxoDir, dir);
}
use of org.apache.lucene.facet.FacetsCollector in project lucene-solr by apache.
the class TestTaxonomyFacetCounts method testBasic.
public void testBasic() throws Exception {
Directory dir = newDirectory();
Directory taxoDir = newDirectory();
// Writes facet ords to a separate directory from the
// main index:
DirectoryTaxonomyWriter taxoWriter = new DirectoryTaxonomyWriter(taxoDir, IndexWriterConfig.OpenMode.CREATE);
FacetsConfig config = new FacetsConfig();
config.setHierarchical("Publish Date", true);
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
Document doc = new Document();
doc.add(new FacetField("Author", "Bob"));
doc.add(new FacetField("Publish Date", "2010", "10", "15"));
writer.addDocument(config.build(taxoWriter, doc));
doc = new Document();
doc.add(new FacetField("Author", "Lisa"));
doc.add(new FacetField("Publish Date", "2010", "10", "20"));
writer.addDocument(config.build(taxoWriter, doc));
doc = new Document();
doc.add(new FacetField("Author", "Lisa"));
doc.add(new FacetField("Publish Date", "2012", "1", "1"));
writer.addDocument(config.build(taxoWriter, doc));
doc = new Document();
doc.add(new FacetField("Author", "Susan"));
doc.add(new FacetField("Publish Date", "2012", "1", "7"));
writer.addDocument(config.build(taxoWriter, doc));
doc = new Document();
doc.add(new FacetField("Author", "Frank"));
doc.add(new FacetField("Publish Date", "1999", "5", "5"));
writer.addDocument(config.build(taxoWriter, doc));
// NRT open
IndexSearcher searcher = newSearcher(writer.getReader());
// NRT open
TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoWriter);
Facets facets = getAllFacets(FacetsConfig.DEFAULT_INDEX_FIELD_NAME, searcher, taxoReader, config);
// Retrieve & verify results:
assertEquals("dim=Publish Date path=[] value=5 childCount=3\n 2010 (2)\n 2012 (2)\n 1999 (1)\n", facets.getTopChildren(10, "Publish Date").toString());
assertEquals("dim=Author path=[] value=5 childCount=4\n Lisa (2)\n Bob (1)\n Susan (1)\n Frank (1)\n", facets.getTopChildren(10, "Author").toString());
// Now user drills down on Publish Date/2010:
DrillDownQuery q2 = new DrillDownQuery(config);
q2.add("Publish Date", "2010");
FacetsCollector c = new FacetsCollector();
searcher.search(q2, c);
facets = new FastTaxonomyFacetCounts(taxoReader, config, c);
assertEquals("dim=Author path=[] value=2 childCount=2\n Bob (1)\n Lisa (1)\n", facets.getTopChildren(10, "Author").toString());
assertEquals(1, facets.getSpecificValue("Author", "Lisa"));
assertNull(facets.getTopChildren(10, "Non exitent dim"));
// Smoke test PrintTaxonomyStats:
ByteArrayOutputStream bos = new ByteArrayOutputStream();
PrintTaxonomyStats.printStats(taxoReader, new PrintStream(bos, false, IOUtils.UTF_8), true);
String result = bos.toString(IOUtils.UTF_8);
assertTrue(result.indexOf("/Author: 4 immediate children; 5 total categories") != -1);
assertTrue(result.indexOf("/Publish Date: 3 immediate children; 12 total categories") != -1);
// Make sure at least a few nodes of the tree came out:
assertTrue(result.indexOf(" /1999") != -1);
assertTrue(result.indexOf(" /2012") != -1);
assertTrue(result.indexOf(" /20") != -1);
writer.close();
IOUtils.close(taxoWriter, searcher.getIndexReader(), taxoReader, taxoDir, dir);
}
Aggregations