use of org.apache.lucene.index.IndexReaderContext in project lucene-solr by apache.
the class LongRangeFacetCounts method count.
private void count(LongValuesSource valueSource, List<MatchingDocs> matchingDocs) throws IOException {
LongRange[] ranges = (LongRange[]) this.ranges;
LongRangeCounter counter = new LongRangeCounter(ranges);
int missingCount = 0;
for (MatchingDocs hits : matchingDocs) {
LongValues fv = valueSource.getValues(hits.context, null);
totCount += hits.totalHits;
final DocIdSetIterator fastMatchDocs;
if (fastMatchQuery != null) {
final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(hits.context);
final IndexSearcher searcher = new IndexSearcher(topLevelContext);
searcher.setQueryCache(null);
final Weight fastMatchWeight = searcher.createNormalizedWeight(fastMatchQuery, false);
Scorer s = fastMatchWeight.scorer(hits.context);
if (s == null) {
continue;
}
fastMatchDocs = s.iterator();
} else {
fastMatchDocs = null;
}
DocIdSetIterator docs = hits.bits.iterator();
for (int doc = docs.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; ) {
if (fastMatchDocs != null) {
int fastMatchDoc = fastMatchDocs.docID();
if (fastMatchDoc < doc) {
fastMatchDoc = fastMatchDocs.advance(doc);
}
if (doc != fastMatchDoc) {
doc = docs.advance(fastMatchDoc);
continue;
}
}
// Skip missing docs:
if (fv.advanceExact(doc)) {
counter.add(fv.longValue());
} else {
missingCount++;
}
doc = docs.nextDoc();
}
}
int x = counter.fillCounts(counts);
missingCount += x;
//System.out.println("totCount " + totCount + " missingCount " + counter.missingCount);
totCount -= missingCount;
}
use of org.apache.lucene.index.IndexReaderContext in project lucene-solr by apache.
the class TermAutomatonQuery method createWeight.
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
IndexReaderContext context = searcher.getTopReaderContext();
Map<Integer, TermContext> termStates = new HashMap<>();
for (Map.Entry<BytesRef, Integer> ent : termToID.entrySet()) {
if (ent.getKey() != null) {
termStates.put(ent.getValue(), TermContext.build(context, new Term(field, ent.getKey())));
}
}
return new TermAutomatonWeight(det, searcher, termStates, boost);
}
use of org.apache.lucene.index.IndexReaderContext in project lucene-solr by apache.
the class TestNearSpansOrdered method testSpanNearScorerSkipTo1.
/**
* not a direct test of NearSpans, but a demonstration of how/when
* this causes problems
*/
public void testSpanNearScorerSkipTo1() throws Exception {
SpanNearQuery q = makeQuery();
Weight w = searcher.createNormalizedWeight(q, true);
IndexReaderContext topReaderContext = searcher.getTopReaderContext();
LeafReaderContext leave = topReaderContext.leaves().get(0);
Scorer s = w.scorer(leave);
assertEquals(1, s.iterator().advance(1));
}
use of org.apache.lucene.index.IndexReaderContext in project lucene-solr by apache.
the class SpanTermQuery method createWeight.
@Override
public SpanWeight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
final TermContext context;
final IndexReaderContext topContext = searcher.getTopReaderContext();
if (termContext == null || termContext.wasBuiltFor(topContext) == false) {
context = TermContext.build(topContext, term);
} else {
context = termContext;
}
return new SpanTermWeight(context, searcher, needsScores ? Collections.singletonMap(term, context) : null, boost);
}
use of org.apache.lucene.index.IndexReaderContext in project lucene-solr by apache.
the class QueryComponent method doFieldSortValues.
protected void doFieldSortValues(ResponseBuilder rb, SolrIndexSearcher searcher) throws IOException {
SolrQueryRequest req = rb.req;
SolrQueryResponse rsp = rb.rsp;
// The query cache doesn't currently store sort field values, and SolrIndexSearcher doesn't
// currently have an option to return sort field values. Because of this, we
// take the documents given and re-derive the sort values.
//
// TODO: See SOLR-5595
boolean fsv = req.getParams().getBool(ResponseBuilder.FIELD_SORT_VALUES, false);
if (fsv) {
// order is important for the sort fields
NamedList<Object[]> sortVals = new NamedList<>();
IndexReaderContext topReaderContext = searcher.getTopReaderContext();
List<LeafReaderContext> leaves = topReaderContext.leaves();
LeafReaderContext currentLeaf = null;
if (leaves.size() == 1) {
// if there is a single segment, use that subReader and avoid looking up each time
currentLeaf = leaves.get(0);
leaves = null;
}
DocList docList = rb.getResults().docList;
// sort ids from lowest to highest so we can access them in order
int nDocs = docList.size();
final long[] sortedIds = new long[nDocs];
// doc scores, parallel to sortedIds
final float[] scores = new float[nDocs];
DocList docs = rb.getResults().docList;
DocIterator it = docs.iterator();
for (int i = 0; i < nDocs; i++) {
sortedIds[i] = (((long) it.nextDoc()) << 32) | i;
scores[i] = docs.hasScores() ? it.score() : Float.NaN;
}
// sort ids and scores together
new InPlaceMergeSorter() {
@Override
protected void swap(int i, int j) {
long tmpId = sortedIds[i];
float tmpScore = scores[i];
sortedIds[i] = sortedIds[j];
scores[i] = scores[j];
sortedIds[j] = tmpId;
scores[j] = tmpScore;
}
@Override
protected int compare(int i, int j) {
return Long.compare(sortedIds[i], sortedIds[j]);
}
}.sort(0, sortedIds.length);
SortSpec sortSpec = rb.getSortSpec();
Sort sort = searcher.weightSort(sortSpec.getSort());
SortField[] sortFields = sort == null ? new SortField[] { SortField.FIELD_SCORE } : sort.getSort();
List<SchemaField> schemaFields = sortSpec.getSchemaFields();
for (int fld = 0; fld < schemaFields.size(); fld++) {
SchemaField schemaField = schemaFields.get(fld);
FieldType ft = null == schemaField ? null : schemaField.getType();
SortField sortField = sortFields[fld];
SortField.Type type = sortField.getType();
// :TODO: would be simpler to always serialize every position of SortField[]
if (type == SortField.Type.SCORE || type == SortField.Type.DOC)
continue;
FieldComparator<?> comparator = sortField.getComparator(1, 0);
LeafFieldComparator leafComparator = null;
Object[] vals = new Object[nDocs];
int lastIdx = -1;
int idx = 0;
for (int i = 0; i < sortedIds.length; ++i) {
long idAndPos = sortedIds[i];
float score = scores[i];
int doc = (int) (idAndPos >>> 32);
int position = (int) idAndPos;
if (leaves != null) {
idx = ReaderUtil.subIndex(doc, leaves);
currentLeaf = leaves.get(idx);
if (idx != lastIdx) {
// we switched segments. invalidate leafComparator.
lastIdx = idx;
leafComparator = null;
}
}
if (leafComparator == null) {
leafComparator = comparator.getLeafComparator(currentLeaf);
}
// adjust for what segment this is in
doc -= currentLeaf.docBase;
leafComparator.setScorer(new FakeScorer(doc, score));
leafComparator.copy(0, doc);
Object val = comparator.value(0);
if (null != ft)
val = ft.marshalSortValue(val);
vals[position] = val;
}
sortVals.add(sortField.getField(), vals);
}
rsp.add("sort_values", sortVals);
}
}
Aggregations