use of org.apache.lucene.facet.LabelAndValue in project lucene-solr by apache.
the class ConcurrentSortedSetDocValuesFacetCounts method getDim.
private final FacetResult getDim(String dim, OrdRange ordRange, int topN) throws IOException {
TopOrdAndIntQueue q = null;
int bottomCount = 0;
int dimCount = 0;
int childCount = 0;
TopOrdAndIntQueue.OrdAndValue reuse = null;
//System.out.println("getDim : " + ordRange.start + " - " + ordRange.end);
for (int ord = ordRange.start; ord <= ordRange.end; ord++) {
//System.out.println(" ord=" + ord + " count=" + counts[ord]);
if (counts.get(ord) > 0) {
dimCount += counts.get(ord);
childCount++;
if (counts.get(ord) > bottomCount) {
if (reuse == null) {
reuse = new TopOrdAndIntQueue.OrdAndValue();
}
reuse.ord = ord;
reuse.value = counts.get(ord);
if (q == null) {
// Lazy init, so we don't create this for the
// sparse case unnecessarily
q = new TopOrdAndIntQueue(topN);
}
reuse = q.insertWithOverflow(reuse);
if (q.size() == topN) {
bottomCount = q.top().value;
}
}
}
}
if (q == null) {
return null;
}
LabelAndValue[] labelValues = new LabelAndValue[q.size()];
for (int i = labelValues.length - 1; i >= 0; i--) {
TopOrdAndIntQueue.OrdAndValue ordAndValue = q.pop();
final BytesRef term = dv.lookupOrd(ordAndValue.ord);
String[] parts = FacetsConfig.stringToPath(term.utf8ToString());
labelValues[i] = new LabelAndValue(parts[1], ordAndValue.value);
}
return new FacetResult(dim, new String[0], dimCount, labelValues, childCount);
}
use of org.apache.lucene.facet.LabelAndValue in project lucene-solr by apache.
the class TestRangeFacetCounts method testRandomLongs.
public void testRandomLongs() throws Exception {
Directory dir = newDirectory();
RandomIndexWriter w = new RandomIndexWriter(random(), dir);
int numDocs = atLeast(1000);
if (VERBOSE) {
System.out.println("TEST: numDocs=" + numDocs);
}
long[] values = new long[numDocs];
long minValue = Long.MAX_VALUE;
long maxValue = Long.MIN_VALUE;
for (int i = 0; i < numDocs; i++) {
Document doc = new Document();
long v = random().nextLong();
values[i] = v;
doc.add(new NumericDocValuesField("field", v));
doc.add(new LongPoint("field", v));
w.addDocument(doc);
minValue = Math.min(minValue, v);
maxValue = Math.max(maxValue, v);
}
IndexReader r = w.getReader();
IndexSearcher s = newSearcher(r, false);
FacetsConfig config = new FacetsConfig();
int numIters = atLeast(10);
for (int iter = 0; iter < numIters; iter++) {
if (VERBOSE) {
System.out.println("TEST: iter=" + iter);
}
int numRange = TestUtil.nextInt(random(), 1, 100);
LongRange[] ranges = new LongRange[numRange];
int[] expectedCounts = new int[numRange];
long minAcceptedValue = Long.MAX_VALUE;
long maxAcceptedValue = Long.MIN_VALUE;
for (int rangeID = 0; rangeID < numRange; rangeID++) {
long min;
if (rangeID > 0 && random().nextInt(10) == 7) {
// Use an existing boundary:
LongRange prevRange = ranges[random().nextInt(rangeID)];
if (random().nextBoolean()) {
min = prevRange.min;
} else {
min = prevRange.max;
}
} else {
min = random().nextLong();
}
long max;
if (rangeID > 0 && random().nextInt(10) == 7) {
// Use an existing boundary:
LongRange prevRange = ranges[random().nextInt(rangeID)];
if (random().nextBoolean()) {
max = prevRange.min;
} else {
max = prevRange.max;
}
} else {
max = random().nextLong();
}
if (min > max) {
long x = min;
min = max;
max = x;
}
boolean minIncl;
boolean maxIncl;
// NOTE: max - min >= 0 is here to handle the common overflow case!
if (max - min >= 0 && max - min < 2) {
// If max == min or max == min+1, we always do inclusive, else we might pass an empty range and hit exc from LongRange's ctor:
minIncl = true;
maxIncl = true;
} else {
minIncl = random().nextBoolean();
maxIncl = random().nextBoolean();
}
ranges[rangeID] = new LongRange("r" + rangeID, min, minIncl, max, maxIncl);
if (VERBOSE) {
System.out.println(" range " + rangeID + ": " + ranges[rangeID]);
}
// expected count:
for (int i = 0; i < numDocs; i++) {
boolean accept = true;
if (minIncl) {
accept &= values[i] >= min;
} else {
accept &= values[i] > min;
}
if (maxIncl) {
accept &= values[i] <= max;
} else {
accept &= values[i] < max;
}
if (accept) {
expectedCounts[rangeID]++;
minAcceptedValue = Math.min(minAcceptedValue, values[i]);
maxAcceptedValue = Math.max(maxAcceptedValue, values[i]);
}
}
}
FacetsCollector sfc = new FacetsCollector();
s.search(new MatchAllDocsQuery(), sfc);
Query fastMatchQuery;
if (random().nextBoolean()) {
if (random().nextBoolean()) {
fastMatchQuery = LongPoint.newRangeQuery("field", minValue, maxValue);
} else {
fastMatchQuery = LongPoint.newRangeQuery("field", minAcceptedValue, maxAcceptedValue);
}
} else {
fastMatchQuery = null;
}
LongValuesSource vs = LongValuesSource.fromLongField("field");
Facets facets = new LongRangeFacetCounts("field", vs, sfc, fastMatchQuery, ranges);
FacetResult result = facets.getTopChildren(10, "field");
assertEquals(numRange, result.labelValues.length);
for (int rangeID = 0; rangeID < numRange; rangeID++) {
if (VERBOSE) {
System.out.println(" range " + rangeID + " expectedCount=" + expectedCounts[rangeID]);
}
LabelAndValue subNode = result.labelValues[rangeID];
assertEquals("r" + rangeID, subNode.label);
assertEquals(expectedCounts[rangeID], subNode.value.intValue());
LongRange range = ranges[rangeID];
// Test drill-down:
DrillDownQuery ddq = new DrillDownQuery(config);
if (random().nextBoolean()) {
ddq.add("field", LongPoint.newRangeQuery("field", range.min, range.max));
} else {
ddq.add("field", range.getQuery(fastMatchQuery, vs));
}
assertEquals(expectedCounts[rangeID], s.search(ddq, 10).totalHits);
}
}
w.close();
IOUtils.close(r, dir);
}
use of org.apache.lucene.facet.LabelAndValue in project lucene-solr by apache.
the class TestRangeFacetCounts method testRandomDoubles.
public void testRandomDoubles() throws Exception {
Directory dir = newDirectory();
RandomIndexWriter w = new RandomIndexWriter(random(), dir);
int numDocs = atLeast(1000);
double[] values = new double[numDocs];
double minValue = Double.POSITIVE_INFINITY;
double maxValue = Double.NEGATIVE_INFINITY;
for (int i = 0; i < numDocs; i++) {
Document doc = new Document();
double v = random().nextDouble();
values[i] = v;
doc.add(new DoubleDocValuesField("field", v));
doc.add(new DoublePoint("field", v));
w.addDocument(doc);
minValue = Math.min(minValue, v);
maxValue = Math.max(maxValue, v);
}
IndexReader r = w.getReader();
IndexSearcher s = newSearcher(r, false);
FacetsConfig config = new FacetsConfig();
int numIters = atLeast(10);
for (int iter = 0; iter < numIters; iter++) {
if (VERBOSE) {
System.out.println("TEST: iter=" + iter);
}
int numRange = TestUtil.nextInt(random(), 1, 5);
DoubleRange[] ranges = new DoubleRange[numRange];
int[] expectedCounts = new int[numRange];
double minAcceptedValue = Double.POSITIVE_INFINITY;
double maxAcceptedValue = Double.NEGATIVE_INFINITY;
for (int rangeID = 0; rangeID < numRange; rangeID++) {
double min;
if (rangeID > 0 && random().nextInt(10) == 7) {
// Use an existing boundary:
DoubleRange prevRange = ranges[random().nextInt(rangeID)];
if (random().nextBoolean()) {
min = prevRange.min;
} else {
min = prevRange.max;
}
} else {
min = random().nextDouble();
}
double max;
if (rangeID > 0 && random().nextInt(10) == 7) {
// Use an existing boundary:
DoubleRange prevRange = ranges[random().nextInt(rangeID)];
if (random().nextBoolean()) {
max = prevRange.min;
} else {
max = prevRange.max;
}
} else {
max = random().nextDouble();
}
if (min > max) {
double x = min;
min = max;
max = x;
}
boolean minIncl;
boolean maxIncl;
long minAsLong = NumericUtils.doubleToSortableLong(min);
long maxAsLong = NumericUtils.doubleToSortableLong(max);
// NOTE: maxAsLong - minAsLong >= 0 is here to handle the common overflow case!
if (maxAsLong - minAsLong >= 0 && maxAsLong - minAsLong < 2) {
minIncl = true;
maxIncl = true;
} else {
minIncl = random().nextBoolean();
maxIncl = random().nextBoolean();
}
ranges[rangeID] = new DoubleRange("r" + rangeID, min, minIncl, max, maxIncl);
// expected count:
for (int i = 0; i < numDocs; i++) {
boolean accept = true;
if (minIncl) {
accept &= values[i] >= min;
} else {
accept &= values[i] > min;
}
if (maxIncl) {
accept &= values[i] <= max;
} else {
accept &= values[i] < max;
}
if (accept) {
expectedCounts[rangeID]++;
minAcceptedValue = Math.min(minAcceptedValue, values[i]);
maxAcceptedValue = Math.max(maxAcceptedValue, values[i]);
}
}
}
FacetsCollector sfc = new FacetsCollector();
s.search(new MatchAllDocsQuery(), sfc);
Query fastMatchFilter;
if (random().nextBoolean()) {
if (random().nextBoolean()) {
fastMatchFilter = DoublePoint.newRangeQuery("field", minValue, maxValue);
} else {
fastMatchFilter = DoublePoint.newRangeQuery("field", minAcceptedValue, maxAcceptedValue);
}
} else {
fastMatchFilter = null;
}
DoubleValuesSource vs = DoubleValuesSource.fromDoubleField("field");
Facets facets = new DoubleRangeFacetCounts("field", vs, sfc, fastMatchFilter, ranges);
FacetResult result = facets.getTopChildren(10, "field");
assertEquals(numRange, result.labelValues.length);
for (int rangeID = 0; rangeID < numRange; rangeID++) {
if (VERBOSE) {
System.out.println(" range " + rangeID + " expectedCount=" + expectedCounts[rangeID]);
}
LabelAndValue subNode = result.labelValues[rangeID];
assertEquals("r" + rangeID, subNode.label);
assertEquals(expectedCounts[rangeID], subNode.value.intValue());
DoubleRange range = ranges[rangeID];
// Test drill-down:
DrillDownQuery ddq = new DrillDownQuery(config);
if (random().nextBoolean()) {
ddq.add("field", DoublePoint.newRangeQuery("field", range.min, range.max));
} else {
ddq.add("field", range.getQuery(fastMatchFilter, vs));
}
assertEquals(expectedCounts[rangeID], s.search(ddq, 10).totalHits);
}
}
w.close();
IOUtils.close(r, dir);
}
use of org.apache.lucene.facet.LabelAndValue in project lucene-solr by apache.
the class TestTaxonomyFacetCounts2 method testNoParents.
@Test
public void testNoParents() throws Exception {
DirectoryReader indexReader = DirectoryReader.open(indexDir);
TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoDir);
IndexSearcher searcher = newSearcher(indexReader);
FacetsCollector sfc = new FacetsCollector();
searcher.search(new MatchAllDocsQuery(), sfc);
Facets facets = getTaxonomyFacetCounts(taxoReader, getConfig(), sfc);
FacetResult result = facets.getTopChildren(NUM_CHILDREN_CP_C, CP_C);
assertEquals(allExpectedCounts.get(CP_C), result.value);
for (LabelAndValue labelValue : result.labelValues) {
assertEquals(allExpectedCounts.get(CP_C + "/" + labelValue.label), labelValue.value);
}
result = facets.getTopChildren(NUM_CHILDREN_CP_D, CP_D);
assertEquals(allExpectedCounts.get(CP_C), result.value);
for (LabelAndValue labelValue : result.labelValues) {
assertEquals(allExpectedCounts.get(CP_D + "/" + labelValue.label), labelValue.value);
}
IOUtils.close(indexReader, taxoReader);
}
use of org.apache.lucene.facet.LabelAndValue in project lucene-solr by apache.
the class TestTaxonomyFacetCounts method testSegmentsWithoutCategoriesOrResults.
public void testSegmentsWithoutCategoriesOrResults() throws Exception {
// tests the accumulator when there are segments with no results
Directory indexDir = newDirectory();
Directory taxoDir = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
// prevent merges
iwc.setMergePolicy(NoMergePolicy.INSTANCE);
IndexWriter indexWriter = new IndexWriter(indexDir, iwc);
TaxonomyWriter taxoWriter = new DirectoryTaxonomyWriter(taxoDir);
FacetsConfig config = new FacetsConfig();
// 1st segment, no content, with categories
indexTwoDocs(taxoWriter, indexWriter, config, false);
// 2nd segment, with content, no categories
indexTwoDocs(taxoWriter, indexWriter, null, true);
// 3rd segment ok
indexTwoDocs(taxoWriter, indexWriter, config, true);
// 4th segment, no content, or categories
indexTwoDocs(taxoWriter, indexWriter, null, false);
// 5th segment, with content, no categories
indexTwoDocs(taxoWriter, indexWriter, null, true);
// 6th segment, with content, with categories
indexTwoDocs(taxoWriter, indexWriter, config, true);
// 7th segment, with content, no categories
indexTwoDocs(taxoWriter, indexWriter, null, true);
indexWriter.close();
IOUtils.close(taxoWriter);
DirectoryReader indexReader = DirectoryReader.open(indexDir);
TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoDir);
IndexSearcher indexSearcher = newSearcher(indexReader);
// search for "f:a", only segments 1 and 3 should match results
Query q = new TermQuery(new Term("f", "a"));
FacetsCollector sfc = new FacetsCollector();
indexSearcher.search(q, sfc);
Facets facets = getTaxonomyFacetCounts(taxoReader, config, sfc);
FacetResult result = facets.getTopChildren(10, "A");
assertEquals("wrong number of children", 2, result.labelValues.length);
for (LabelAndValue labelValue : result.labelValues) {
assertEquals("wrong weight for child " + labelValue.label, 2, labelValue.value.intValue());
}
IOUtils.close(indexReader, taxoReader, indexDir, taxoDir);
}
Aggregations