use of org.apache.lucene.search.TwoPhaseIterator in project lucene-solr by apache.
the class TestDrillSideways method testRandom.
public void testRandom() throws Exception {
while (aChance == 0.0) {
aChance = random().nextDouble();
}
while (bChance == 0.0) {
bChance = random().nextDouble();
}
while (cChance == 0.0) {
cChance = random().nextDouble();
}
//aChance = .01;
//bChance = 0.5;
//cChance = 1.0;
double sum = aChance + bChance + cChance;
aChance /= sum;
bChance /= sum;
cChance /= sum;
int numDims = TestUtil.nextInt(random(), 2, 5);
//int numDims = 3;
int numDocs = atLeast(3000);
//int numDocs = 20;
if (VERBOSE) {
System.out.println("numDims=" + numDims + " numDocs=" + numDocs + " aChance=" + aChance + " bChance=" + bChance + " cChance=" + cChance);
}
String[][] dimValues = new String[numDims][];
int valueCount = 2;
for (int dim = 0; dim < numDims; dim++) {
Set<String> values = new HashSet<>();
while (values.size() < valueCount) {
String s = TestUtil.randomRealisticUnicodeString(random());
//String s = _TestUtil.randomString(random());
if (s.length() > 0) {
values.add(s);
}
}
dimValues[dim] = values.toArray(new String[values.size()]);
valueCount *= 2;
}
List<Doc> docs = new ArrayList<>();
for (int i = 0; i < numDocs; i++) {
Doc doc = new Doc();
doc.id = "" + i;
doc.contentToken = randomContentToken(false);
doc.dims = new int[numDims];
doc.dims2 = new int[numDims];
for (int dim = 0; dim < numDims; dim++) {
if (random().nextInt(5) == 3) {
// This doc is missing this dim:
doc.dims[dim] = -1;
} else if (dimValues[dim].length <= 4) {
int dimUpto = 0;
doc.dims[dim] = dimValues[dim].length - 1;
while (dimUpto < dimValues[dim].length) {
if (random().nextBoolean()) {
doc.dims[dim] = dimUpto;
break;
}
dimUpto++;
}
} else {
doc.dims[dim] = random().nextInt(dimValues[dim].length);
}
if (random().nextInt(5) == 3) {
// 2nd value:
doc.dims2[dim] = random().nextInt(dimValues[dim].length);
} else {
doc.dims2[dim] = -1;
}
}
docs.add(doc);
}
Directory d = newDirectory();
Directory td = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
iwc.setInfoStream(InfoStream.NO_OUTPUT);
RandomIndexWriter w = new RandomIndexWriter(random(), d, iwc);
DirectoryTaxonomyWriter tw = new DirectoryTaxonomyWriter(td, IndexWriterConfig.OpenMode.CREATE);
FacetsConfig config = new FacetsConfig();
for (int i = 0; i < numDims; i++) {
config.setMultiValued("dim" + i, true);
}
boolean doUseDV = random().nextBoolean();
for (Doc rawDoc : docs) {
Document doc = new Document();
doc.add(newStringField("id", rawDoc.id, Field.Store.YES));
doc.add(new SortedDocValuesField("id", new BytesRef(rawDoc.id)));
doc.add(newStringField("content", rawDoc.contentToken, Field.Store.NO));
if (VERBOSE) {
System.out.println(" doc id=" + rawDoc.id + " token=" + rawDoc.contentToken);
}
for (int dim = 0; dim < numDims; dim++) {
int dimValue = rawDoc.dims[dim];
if (dimValue != -1) {
if (doUseDV) {
doc.add(new SortedSetDocValuesFacetField("dim" + dim, dimValues[dim][dimValue]));
} else {
doc.add(new FacetField("dim" + dim, dimValues[dim][dimValue]));
}
doc.add(new StringField("dim" + dim, dimValues[dim][dimValue], Field.Store.YES));
if (VERBOSE) {
System.out.println(" dim" + dim + "=" + new BytesRef(dimValues[dim][dimValue]));
}
}
int dimValue2 = rawDoc.dims2[dim];
if (dimValue2 != -1) {
if (doUseDV) {
doc.add(new SortedSetDocValuesFacetField("dim" + dim, dimValues[dim][dimValue2]));
} else {
doc.add(new FacetField("dim" + dim, dimValues[dim][dimValue2]));
}
doc.add(new StringField("dim" + dim, dimValues[dim][dimValue2], Field.Store.YES));
if (VERBOSE) {
System.out.println(" dim" + dim + "=" + new BytesRef(dimValues[dim][dimValue2]));
}
}
}
w.addDocument(config.build(tw, doc));
}
if (random().nextBoolean()) {
// Randomly delete a few docs:
int numDel = TestUtil.nextInt(random(), 1, (int) (numDocs * 0.05));
if (VERBOSE) {
System.out.println("delete " + numDel);
}
int delCount = 0;
while (delCount < numDel) {
Doc doc = docs.get(random().nextInt(docs.size()));
if (!doc.deleted) {
if (VERBOSE) {
System.out.println(" delete id=" + doc.id);
}
doc.deleted = true;
w.deleteDocuments(new Term("id", doc.id));
delCount++;
}
}
}
if (random().nextBoolean()) {
if (VERBOSE) {
System.out.println("TEST: forceMerge(1)...");
}
w.forceMerge(1);
}
IndexReader r = w.getReader();
final SortedSetDocValuesReaderState sortedSetDVState;
IndexSearcher s = newSearcher(r);
if (doUseDV) {
sortedSetDVState = new DefaultSortedSetDocValuesReaderState(s.getIndexReader());
} else {
sortedSetDVState = null;
}
if (VERBOSE) {
System.out.println("r.numDocs() = " + r.numDocs());
}
// NRT open
TaxonomyReader tr = new DirectoryTaxonomyReader(tw);
int numIters = atLeast(10);
for (int iter = 0; iter < numIters; iter++) {
String contentToken = random().nextInt(30) == 17 ? null : randomContentToken(true);
int numDrillDown = TestUtil.nextInt(random(), 1, Math.min(4, numDims));
if (VERBOSE) {
System.out.println("\nTEST: iter=" + iter + " baseQuery=" + contentToken + " numDrillDown=" + numDrillDown + " useSortedSetDV=" + doUseDV);
}
String[][] drillDowns = new String[numDims][];
int count = 0;
boolean anyMultiValuedDrillDowns = false;
while (count < numDrillDown) {
int dim = random().nextInt(numDims);
if (drillDowns[dim] == null) {
if (random().nextBoolean()) {
// Drill down on one value:
drillDowns[dim] = new String[] { dimValues[dim][random().nextInt(dimValues[dim].length)] };
} else {
int orCount = TestUtil.nextInt(random(), 1, Math.min(5, dimValues[dim].length));
drillDowns[dim] = new String[orCount];
anyMultiValuedDrillDowns |= orCount > 1;
for (int i = 0; i < orCount; i++) {
while (true) {
String value = dimValues[dim][random().nextInt(dimValues[dim].length)];
for (int j = 0; j < i; j++) {
if (value.equals(drillDowns[dim][j])) {
value = null;
break;
}
}
if (value != null) {
drillDowns[dim][i] = value;
break;
}
}
}
}
if (VERBOSE) {
BytesRef[] values = new BytesRef[drillDowns[dim].length];
for (int i = 0; i < values.length; i++) {
values[i] = new BytesRef(drillDowns[dim][i]);
}
System.out.println(" dim" + dim + "=" + Arrays.toString(values));
}
count++;
}
}
Query baseQuery;
if (contentToken == null) {
baseQuery = new MatchAllDocsQuery();
} else {
baseQuery = new TermQuery(new Term("content", contentToken));
}
DrillDownQuery ddq = new DrillDownQuery(config, baseQuery);
for (int dim = 0; dim < numDims; dim++) {
if (drillDowns[dim] != null) {
for (String value : drillDowns[dim]) {
ddq.add("dim" + dim, value);
}
}
}
Query filter;
if (random().nextInt(7) == 6) {
if (VERBOSE) {
System.out.println(" only-even filter");
}
filter = new Query() {
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
return new ConstantScoreWeight(this, boost) {
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
DocIdSetIterator approximation = DocIdSetIterator.all(context.reader().maxDoc());
return new ConstantScoreScorer(this, score(), new TwoPhaseIterator(approximation) {
@Override
public boolean matches() throws IOException {
int docID = approximation.docID();
return (Integer.parseInt(context.reader().document(docID).get("id")) & 1) == 0;
}
@Override
public float matchCost() {
return 1000f;
}
});
}
};
}
@Override
public String toString(String field) {
return "drillSidewaysTestFilter";
}
@Override
public boolean equals(Object o) {
return o == this;
}
@Override
public int hashCode() {
return System.identityHashCode(this);
}
};
} else {
filter = null;
}
// Verify docs are always collected in order. If we
// had an AssertingScorer it could catch it when
// Weight.scoresDocsOutOfOrder lies!:
getNewDrillSideways(s, config, tr).search(ddq, new SimpleCollector() {
int lastDocID;
@Override
public void collect(int doc) {
assert doc > lastDocID;
lastDocID = doc;
}
@Override
protected void doSetNextReader(LeafReaderContext context) throws IOException {
lastDocID = -1;
}
@Override
public boolean needsScores() {
return false;
}
});
// subScorers are on the same docID:
if (!anyMultiValuedDrillDowns) {
// Can only do this test when there are no OR'd
// drill-down values, because in that case it's
// easily possible for one of the DD terms to be on
// a future docID:
getNewDrillSidewaysScoreSubdocsAtOnce(s, config, tr).search(ddq, new AssertingSubDocsAtOnceCollector());
}
TestFacetResult expected = slowDrillSidewaysSearch(s, docs, contentToken, drillDowns, dimValues, filter);
Sort sort = new Sort(new SortField("id", SortField.Type.STRING));
DrillSideways ds;
if (doUseDV) {
ds = getNewDrillSideways(s, config, sortedSetDVState);
} else {
ds = getNewDrillSidewaysBuildFacetsResult(s, config, tr);
}
// Retrieve all facets:
DrillSidewaysResult actual = ds.search(ddq, filter, null, numDocs, sort, true, true);
TopDocs hits = s.search(baseQuery, numDocs);
Map<String, Float> scores = new HashMap<>();
for (ScoreDoc sd : hits.scoreDocs) {
scores.put(s.doc(sd.doc).get("id"), sd.score);
}
if (VERBOSE) {
System.out.println(" verify all facets");
}
verifyEquals(dimValues, s, expected, actual, scores, doUseDV);
// Make sure drill down doesn't change score:
Query q = ddq;
if (filter != null) {
q = new BooleanQuery.Builder().add(q, Occur.MUST).add(filter, Occur.FILTER).build();
}
TopDocs ddqHits = s.search(q, numDocs);
assertEquals(expected.hits.size(), ddqHits.totalHits);
for (int i = 0; i < expected.hits.size(); i++) {
// Score should be IDENTICAL:
assertEquals(scores.get(expected.hits.get(i).id), ddqHits.scoreDocs[i].score, 0.0f);
}
}
w.close();
IOUtils.close(r, tr, tw, d, td);
}
use of org.apache.lucene.search.TwoPhaseIterator in project lucene-solr by apache.
the class PhraseHelper method getTermToSpans.
// code extracted & refactored from WSTE.extractWeightedSpanTerms()
private void getTermToSpans(SpanQuery spanQuery, LeafReaderContext readerContext, int doc, Map<BytesRef, Spans> result) throws IOException {
// note: in WSTE there was some field specific looping that seemed pointless so that isn't here.
final IndexSearcher searcher = new IndexSearcher(readerContext.reader());
searcher.setQueryCache(null);
if (willRewrite) {
// searcher.rewrite loops till done
spanQuery = (SpanQuery) searcher.rewrite(spanQuery);
}
// Get the underlying query terms
// sorted so we can loop over results in order shortly...
TreeSet<Term> termSet = new FieldFilteringTermSet();
//needsScores==false
searcher.createWeight(spanQuery, false, 1.0f).extractTerms(termSet);
// Get Spans by running the query against the reader
// TODO it might make sense to re-use/cache the Spans instance, to advance forward between docs
SpanWeight spanWeight = (SpanWeight) searcher.createNormalizedWeight(spanQuery, false);
Spans spans = spanWeight.getSpans(readerContext, SpanWeight.Postings.POSITIONS);
if (spans == null) {
return;
}
TwoPhaseIterator twoPhaseIterator = spans.asTwoPhaseIterator();
if (twoPhaseIterator != null) {
if (twoPhaseIterator.approximation().advance(doc) != doc || !twoPhaseIterator.matches()) {
return;
}
} else if (spans.advance(doc) != doc) {
// preposition, and return doing nothing if find none
return;
}
// Consume the Spans into a cache. This instance is used as a source for multiple cloned copies.
// It's important we do this and not re-use the same original Spans instance since these will be iterated
// independently later on; sometimes in ways that prevents sharing the original Spans.
// consumes spans for this doc only and caches
CachedSpans cachedSpansSource = new CachedSpans(spans);
// we don't use it below
spans = null;
// Map terms to a Spans instance (aggregate if necessary)
for (final Term queryTerm : termSet) {
// spanQuery list were already filtered by these conditions.
if (positionInsensitiveTerms.contains(queryTerm)) {
continue;
}
// copy-constructor refers to same data (shallow) but has iteration state from the beginning
CachedSpans cachedSpans = new CachedSpans(cachedSpansSource);
// Add the span to whatever span may or may not exist
Spans existingSpans = result.get(queryTerm.bytes());
if (existingSpans != null) {
if (existingSpans instanceof MultiSpans) {
((MultiSpans) existingSpans).addSpans(cachedSpans);
} else {
// upgrade to MultiSpans
MultiSpans multiSpans = new MultiSpans();
multiSpans.addSpans(existingSpans);
multiSpans.addSpans(cachedSpans);
result.put(queryTerm.bytes(), multiSpans);
}
} else {
result.put(queryTerm.bytes(), cachedSpans);
}
}
}
use of org.apache.lucene.search.TwoPhaseIterator in project lucene-solr by apache.
the class IntersectsRPTVerifyQuery method createWeight.
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
final Map valueSourceContext = ValueSource.newContext(searcher);
return new ConstantScoreWeight(this, boost) {
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
// Compute approx & exact
final IntersectsDifferentiatingQuery.IntersectsDifferentiatingVisitor result = intersectsDiffQuery.compute(context);
if (result.approxDocIdSet == null) {
return null;
}
final DocIdSetIterator approxDISI = result.approxDocIdSet.iterator();
if (approxDISI == null) {
return null;
}
final DocIdSetIterator exactIterator;
if (result.exactDocIdSet != null) {
// If both sets are the same, there's nothing to verify; we needn't return a TwoPhaseIterator
if (result.approxDocIdSet == result.exactDocIdSet) {
return new ConstantScoreScorer(this, score(), approxDISI);
}
exactIterator = result.exactDocIdSet.iterator();
assert exactIterator != null;
} else {
exactIterator = null;
}
final FunctionValues predFuncValues = predicateValueSource.getValues(valueSourceContext, context);
final TwoPhaseIterator twoPhaseIterator = new TwoPhaseIterator(approxDISI) {
@Override
public boolean matches() throws IOException {
final int doc = approxDISI.docID();
if (exactIterator != null) {
if (exactIterator.docID() < doc) {
exactIterator.advance(doc);
}
if (exactIterator.docID() == doc) {
return true;
}
}
return predFuncValues.boolVal(doc);
}
@Override
public float matchCost() {
// TODO: use cost of exactIterator.advance() and predFuncValues.boolVal()
return 100;
}
};
return new ConstantScoreScorer(this, score(), twoPhaseIterator);
}
};
}
use of org.apache.lucene.search.TwoPhaseIterator in project lucene-solr by apache.
the class SortedNumericDocValuesRangeQuery method createWeight.
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
return new ConstantScoreWeight(this, boost) {
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
SortedNumericDocValues values = getValues(context.reader(), field);
if (values == null) {
return null;
}
final NumericDocValues singleton = DocValues.unwrapSingleton(values);
final TwoPhaseIterator iterator;
if (singleton != null) {
iterator = new TwoPhaseIterator(singleton) {
@Override
public boolean matches() throws IOException {
final long value = singleton.longValue();
return value >= lowerValue && value <= upperValue;
}
@Override
public float matchCost() {
// 2 comparisons
return 2;
}
};
} else {
iterator = new TwoPhaseIterator(values) {
@Override
public boolean matches() throws IOException {
for (int i = 0, count = values.docValueCount(); i < count; ++i) {
final long value = values.nextValue();
if (value < lowerValue) {
continue;
}
// Values are sorted, so the first value that is >= lowerValue is our best candidate
return value <= upperValue;
}
// all values were < lowerValue
return false;
}
@Override
public float matchCost() {
// 2 comparisons
return 2;
}
};
}
return new ConstantScoreScorer(this, score(), iterator);
}
};
}
use of org.apache.lucene.search.TwoPhaseIterator in project pyramid by cheng-li.
the class CustomConjunctionSpans method asTwoPhaseIterator.
/**
* Return a {@link TwoPhaseIterator} view of this ConjunctionSpans.
*/
@Override
public TwoPhaseIterator asTwoPhaseIterator() {
float totalMatchCost = 0;
// Compute the matchCost as the total matchCost/positionsCostant of the sub spans.
for (Spans spans : subSpans) {
TwoPhaseIterator tpi = spans.asTwoPhaseIterator();
if (tpi != null) {
totalMatchCost += tpi.matchCost();
} else {
totalMatchCost += spans.positionsCost();
}
}
final float matchCost = totalMatchCost;
return new TwoPhaseIterator(conjunction) {
@Override
public boolean matches() throws IOException {
return twoPhaseCurrentDocMatches();
}
@Override
public float matchCost() {
return matchCost;
}
};
}
Aggregations